max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
Room-DESKTOP-549B209.py | AldoAbdn/Polar-Simulator-2017 | 0 | 6624851 | <filename>Room-DESKTOP-549B209.py
import pygame, sys
from Scene import Scene
from Grid import Grid
from Player import Player
from GridSquare import GridSquare
from GridSquare import GridSquareStar
from Wall import Wall
from Crate import Crate
from SpriteManager import SpriteManager
class Room(object):
"""Main game scene that will be drawn onto another scene that will contain UI elements"""
def __init__(self, imagePath="", width=640, height=480):
#Sets image, if no image generates a blank surface
try:
self.surface = pygame.image.load(imagePath)
except:
self.surface = pygame.Surface([width, height])
#Sets up rect
self.rect = self.surface.get_rect()
self.moves = 0
self.grid = Grid()
self.player = Player()
#Squares
self.tileManager = SpriteManager()
#Star Squares
self.diamondManager = SpriteManager()
#Walls
self.wallManager = SpriteManager()
#Crates
self.crateManager = SpriteManager()
self.startLevel(0)
self.surface.fill(Scene._Colours["red"])
#Getters
def getLevel(self):
return self.level
def getTotalMoves(self):
return self.totalMoves
def getMoves(self):
return self.moves
#Setters
def setLevel(self, value):
self.level = value
def setTotalMoves(self, value):
self.totalMoves = value
def setMoves(self, value):
self.moves = value
#Moves
def incrementMoves(self):
self.totalMoves += 1
self.moves += 1
#Draw
def draw(self, surface):
self.tile
#Setup
def setup(self):
self.grid.reset()
self.player.setCoordinates((2,4))
self.grid.addItem(self.player.getCoordinates(), self.player)
self.tileManager.setup(GridSquare.generate(16), [(0,2),(0,3),(0,4),
(1,2),(1,5),
(2,2),(2,3),(2,4),(2,5),(2,6),
(3,2),(3,4),(3,5),(3,6),
(4,3),(4,4)])
self.diamondManager.setup(GridSquareStar.generate(2),[(0,5),
(4,2)])
self.wallManager.setup(Wall.generate(3),[(1,3),(1,4),
(3,3)])
self.crateManager.setup(Crate.generate(2),[(1,2),
(3,5)])
self.grid.addItemsBySpriteManager(self.tileManager)
self.grid.addItemsBySpriteManager(self.diamondManager)
self.grid.addItemsBySpriteManager(self.wallManager)
self.grid.addItemsBySpriteManager(self.crateManager)
#Event Handlers
def eventManager(self, events):
for event in events:
if event.type == pygame.QUIT:
self.quit()
elif event.type == pygame.KEYDOWN:
self.keyPress(event)
def keyPress(self, event):
if event.key == 119: #W key
self.playerMove("up")
if event.key == 115: #S key
self.playerMove("down")
if event.key == 97: #A key
self.playerMove("left")
if event.key == 100: #D key
self.playerMove("right")
def quit(self):
pygame.quit()
sys.exit()
#Special
def fillSpaceWithWalls(self):
currentGridItems = self.grid.getGridItems()
rows = len(currentGridItems)
collums = len(currentGridItems[0])
for i in range(0,rows):
for j in range(0,collums):
if not currentGridItems[i][j]:
wall = Wall()
self.wallManager.add(wall, (i,j))
#Used to predict where a sprite will be if it were to move a certain direction
def predictCoordinates(self,currentCoordinates, direction):
if direction.lower() == "up":
possibleCoordinates = (currentCoordinates[0] - 1, currentCoordinates[1])
elif direction.lower() == "down":
possibleCoordinates = (currentCoordinates[0] + 1, currentCoordinates[1])
elif direction.lower() == "left":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] - 1)
elif direction.lower() == "right":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] + 1)
return possibleCoordinates
#Checks if player can move and handles events
def playerMove(self, direction):
currentCoordinates = self.player.getCoordinates() #Gets current coordinates
possibleCoordinates = self.predictCoordinates(currentCoordinates,direction)
isLevelOver = False
try: #Catches out of range exeption if player tries to move out of grid and there is no wall
items = self.grid.getItems(possibleCoordinates[0], possibleCoordinates[1])
print items
if items and possibleCoordinates[0] >= 0 and possibleCoordinates[1] >= 0:
for i in range(0, len(items)):
if items[i].__class__.__name__ == "Wall":
return #Returns if player hits a wall
elif items[i].__class__.__name__ == "Crate":
crateMoved = self.crateMove(items[i], direction)
isLevelOver = self.cratesCheck()
if not crateMoved:
return #returns if crate can't move
elif isLevelOver:
self.levelOver()
elif items[i].__class__.__name__ == "GridSquare":
pass
#Player will only move if none of the above are met
if not isLevelOver:
self.incrementMoves()
self.player.move(self.grid.getPosition(possibleCoordinates),possibleCoordinates)
self.grid.addItem(self.player.getCoordinates(), self.player)
return
except:
print "Exception"
#Checks if crate can move and then returns true if player can push crate
def crateMove(self, crate,direction):
currentCoordinates = crate.getCoordinates()
possibleCoordinates = self.predictCoordinates(currentCoordinates,direction)
try: #Catches out of range exeption if player tries to move out of grid and there is no wall
items = self.grid.getItems(possibleCoordinates[0], possibleCoordinates[1])
if items and possibleCoordinates[0] >= 0 and possibleCoordinates[1] >= 0:
for i in range(0, len(items)):
if items[i].__class__.__name__ == "Wall":
return False
elif items[i].__class__.__name__ == "Crate":
return False
else:
crate.move(self.grid.getPosition(possibleCoordinates),possibleCoordinates)
self.grid.addItem(crate.getCoordinates(), crate)
for i in range(0, len(items)):
if isinstance(items[i], GridSquareStar):
print "Grid star"
crate.image.fill((0,255,0))
return True
else:
crate.image.fill((128,0,0))
return True
return True #return true if it moved
return False
except:
print "Exception"
#Checks if all crates are on star squares
def isGameOver(self):
crates = self.crateManager.getSprites()
items = self.grid.getGridItems()
counter = 0
# print items
for i in range(0,len(crates)):
crateCoord = crates[i].getCoordinates()
tempRow = items[crateCoord[0]]
tempCol = tempRow[crateCoord[1]]
for j in tempCol:
if isinstance(j, GridSquareStar):
counter += 1
if counter == len(crates):
return True
else:
return False
| <filename>Room-DESKTOP-549B209.py
import pygame, sys
from Scene import Scene
from Grid import Grid
from Player import Player
from GridSquare import GridSquare
from GridSquare import GridSquareStar
from Wall import Wall
from Crate import Crate
from SpriteManager import SpriteManager
class Room(object):
"""Main game scene that will be drawn onto another scene that will contain UI elements"""
def __init__(self, imagePath="", width=640, height=480):
#Sets image, if no image generates a blank surface
try:
self.surface = pygame.image.load(imagePath)
except:
self.surface = pygame.Surface([width, height])
#Sets up rect
self.rect = self.surface.get_rect()
self.moves = 0
self.grid = Grid()
self.player = Player()
#Squares
self.tileManager = SpriteManager()
#Star Squares
self.diamondManager = SpriteManager()
#Walls
self.wallManager = SpriteManager()
#Crates
self.crateManager = SpriteManager()
self.startLevel(0)
self.surface.fill(Scene._Colours["red"])
#Getters
def getLevel(self):
return self.level
def getTotalMoves(self):
return self.totalMoves
def getMoves(self):
return self.moves
#Setters
def setLevel(self, value):
self.level = value
def setTotalMoves(self, value):
self.totalMoves = value
def setMoves(self, value):
self.moves = value
#Moves
def incrementMoves(self):
self.totalMoves += 1
self.moves += 1
#Draw
def draw(self, surface):
self.tile
#Setup
def setup(self):
self.grid.reset()
self.player.setCoordinates((2,4))
self.grid.addItem(self.player.getCoordinates(), self.player)
self.tileManager.setup(GridSquare.generate(16), [(0,2),(0,3),(0,4),
(1,2),(1,5),
(2,2),(2,3),(2,4),(2,5),(2,6),
(3,2),(3,4),(3,5),(3,6),
(4,3),(4,4)])
self.diamondManager.setup(GridSquareStar.generate(2),[(0,5),
(4,2)])
self.wallManager.setup(Wall.generate(3),[(1,3),(1,4),
(3,3)])
self.crateManager.setup(Crate.generate(2),[(1,2),
(3,5)])
self.grid.addItemsBySpriteManager(self.tileManager)
self.grid.addItemsBySpriteManager(self.diamondManager)
self.grid.addItemsBySpriteManager(self.wallManager)
self.grid.addItemsBySpriteManager(self.crateManager)
#Event Handlers
def eventManager(self, events):
for event in events:
if event.type == pygame.QUIT:
self.quit()
elif event.type == pygame.KEYDOWN:
self.keyPress(event)
def keyPress(self, event):
if event.key == 119: #W key
self.playerMove("up")
if event.key == 115: #S key
self.playerMove("down")
if event.key == 97: #A key
self.playerMove("left")
if event.key == 100: #D key
self.playerMove("right")
def quit(self):
pygame.quit()
sys.exit()
#Special
def fillSpaceWithWalls(self):
currentGridItems = self.grid.getGridItems()
rows = len(currentGridItems)
collums = len(currentGridItems[0])
for i in range(0,rows):
for j in range(0,collums):
if not currentGridItems[i][j]:
wall = Wall()
self.wallManager.add(wall, (i,j))
#Used to predict where a sprite will be if it were to move a certain direction
def predictCoordinates(self,currentCoordinates, direction):
if direction.lower() == "up":
possibleCoordinates = (currentCoordinates[0] - 1, currentCoordinates[1])
elif direction.lower() == "down":
possibleCoordinates = (currentCoordinates[0] + 1, currentCoordinates[1])
elif direction.lower() == "left":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] - 1)
elif direction.lower() == "right":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] + 1)
return possibleCoordinates
#Checks if player can move and handles events
def playerMove(self, direction):
currentCoordinates = self.player.getCoordinates() #Gets current coordinates
possibleCoordinates = self.predictCoordinates(currentCoordinates,direction)
isLevelOver = False
try: #Catches out of range exeption if player tries to move out of grid and there is no wall
items = self.grid.getItems(possibleCoordinates[0], possibleCoordinates[1])
print items
if items and possibleCoordinates[0] >= 0 and possibleCoordinates[1] >= 0:
for i in range(0, len(items)):
if items[i].__class__.__name__ == "Wall":
return #Returns if player hits a wall
elif items[i].__class__.__name__ == "Crate":
crateMoved = self.crateMove(items[i], direction)
isLevelOver = self.cratesCheck()
if not crateMoved:
return #returns if crate can't move
elif isLevelOver:
self.levelOver()
elif items[i].__class__.__name__ == "GridSquare":
pass
#Player will only move if none of the above are met
if not isLevelOver:
self.incrementMoves()
self.player.move(self.grid.getPosition(possibleCoordinates),possibleCoordinates)
self.grid.addItem(self.player.getCoordinates(), self.player)
return
except:
print "Exception"
#Checks if crate can move and then returns true if player can push crate
def crateMove(self, crate,direction):
currentCoordinates = crate.getCoordinates()
possibleCoordinates = self.predictCoordinates(currentCoordinates,direction)
try: #Catches out of range exeption if player tries to move out of grid and there is no wall
items = self.grid.getItems(possibleCoordinates[0], possibleCoordinates[1])
if items and possibleCoordinates[0] >= 0 and possibleCoordinates[1] >= 0:
for i in range(0, len(items)):
if items[i].__class__.__name__ == "Wall":
return False
elif items[i].__class__.__name__ == "Crate":
return False
else:
crate.move(self.grid.getPosition(possibleCoordinates),possibleCoordinates)
self.grid.addItem(crate.getCoordinates(), crate)
for i in range(0, len(items)):
if isinstance(items[i], GridSquareStar):
print "Grid star"
crate.image.fill((0,255,0))
return True
else:
crate.image.fill((128,0,0))
return True
return True #return true if it moved
return False
except:
print "Exception"
#Checks if all crates are on star squares
def isGameOver(self):
crates = self.crateManager.getSprites()
items = self.grid.getGridItems()
counter = 0
# print items
for i in range(0,len(crates)):
crateCoord = crates[i].getCoordinates()
tempRow = items[crateCoord[0]]
tempCol = tempRow[crateCoord[1]]
for j in tempCol:
if isinstance(j, GridSquareStar):
counter += 1
if counter == len(crates):
return True
else:
return False
| en | 0.853899 | Main game scene that will be drawn onto another scene that will contain UI elements #Sets image, if no image generates a blank surface #Sets up rect #Squares #Star Squares #Walls #Crates #Getters #Setters #Moves #Draw #Setup #Event Handlers #W key #S key #A key #D key #Special #Used to predict where a sprite will be if it were to move a certain direction #Checks if player can move and handles events #Gets current coordinates #Catches out of range exeption if player tries to move out of grid and there is no wall #Returns if player hits a wall #returns if crate can't move #Player will only move if none of the above are met #Checks if crate can move and then returns true if player can push crate #Catches out of range exeption if player tries to move out of grid and there is no wall #return true if it moved #Checks if all crates are on star squares # print items | 2.762022 | 3 |
symphony/cli/graphql_compiler/gql/renderer_dataclasses.py | remo5000/magma | 1 | 6624852 | <reponame>remo5000/magma<gh_stars>1-10
#!/usr/bin/env python3
from graphql import GraphQLSchema
from .utils_codegen import CodeChunk
from .query_parser import ParsedQuery, ParsedField, ParsedObject, ParsedEnum, \
ParsedOperation, ParsedVariableDefinition
class DataclassesRenderer:
def __init__(self, schema: GraphQLSchema):
self.schema = schema
def render(self, parsed_query: ParsedQuery):
# We sort fragment nodes to be first and operations to be last because
# of dependecies
buffer = CodeChunk()
buffer.write("#!/usr/bin/env python3")
buffer.write("# @" + "generated AUTOGENERATED file. Do not Change!")
buffer.write("")
buffer.write("from dataclasses import dataclass, field")
buffer.write("from datetime import datetime")
buffer.write("from enum import Enum")
buffer.write("from functools import partial")
buffer.write("from typing import Any, Callable, List, Mapping, Optional")
buffer.write("")
buffer.write("from dataclasses_json import dataclass_json")
buffer.write("from marshmallow import fields as marshmallow_fields")
buffer.write("")
buffer.write("from .datetime_utils import fromisoformat")
buffer.write("")
self.__render_datetime_field(buffer)
# Enums
if parsed_query.enums:
buffer.write('')
self.__render_enum_field(buffer)
for enum in parsed_query.enums:
buffer.write('')
self.__render_enum(buffer, enum)
sorted_objects = sorted(parsed_query.objects, key=lambda obj: 1 if
isinstance(obj, ParsedOperation) else 0)
for obj in sorted_objects:
buffer.write('')
if isinstance(obj, ParsedObject):
self.__render_object(parsed_query, buffer, obj)
elif isinstance(obj, ParsedOperation):
self.__render_operation(parsed_query, buffer, obj)
return str(buffer)
@staticmethod
def __render_enum_field(buffer: CodeChunk):
with buffer.write_block('def enum_field(enum_type):'):
with buffer.write_block('def encode_enum(value):'):
buffer.write('return value.value')
buffer.write('')
with buffer.write_block('def decode_enum(t, value):'):
buffer.write('return t(value)')
buffer.write('')
buffer.write("return field(")
buffer.write(" metadata={")
buffer.write(' "dataclasses_json": {')
buffer.write(' "encoder": encode_enum,')
buffer.write(' "decoder": partial(decode_enum, enum_type),')
buffer.write(" }")
buffer.write(" }")
buffer.write(")")
buffer.write('')
@staticmethod
def __render_datetime_field(buffer: CodeChunk):
buffer.write('')
buffer.write("DATETIME_FIELD = field(")
buffer.write(" metadata={")
buffer.write(' "dataclasses_json": {')
buffer.write(' "encoder": datetime.isoformat,')
buffer.write(' "decoder": fromisoformat,')
buffer.write(' "mm_field": marshmallow_fields.DateTime'
+ '(format="iso"),')
buffer.write(" }")
buffer.write(" }")
buffer.write(")")
buffer.write('')
def __render_object(
self, parsed_query: ParsedQuery, buffer: CodeChunk, obj: ParsedObject):
class_parents = '' if not obj.parents else f'({", ".join(obj.parents)})'
buffer.write('@dataclass_json')
buffer.write('@dataclass')
with buffer.write_block(f'class {obj.name}{class_parents}:'):
# render child objects
for child_object in obj.children:
self.__render_object(parsed_query, buffer, child_object)
# render fields
sorted_fields = sorted(obj.fields, key=lambda f: 1 if f.nullable else 0)
for field in sorted_fields:
self.__render_field(parsed_query, buffer, field)
# pass if not children or fields
if not (obj.children or obj.fields):
buffer.write('pass')
buffer.write('')
def __render_operation(
self,
parsed_query: ParsedQuery,
buffer: CodeChunk,
parsed_op: ParsedOperation):
buffer.write('@dataclass_json')
buffer.write('@dataclass')
with buffer.write_block(f'class {parsed_op.name}:'):
buffer.write('__QUERY__ = """')
buffer.write(parsed_query.query)
buffer.write('"""')
buffer.write('')
# Render children
for child_object in parsed_op.children:
self.__render_object(parsed_query, buffer, child_object)
# operation fields
buffer.write(f'data: Optional[{parsed_op.name}Data] = None')
buffer.write('errors: Any = None')
buffer.write('')
# Execution functions
if parsed_op.variables:
vars_args = ', ' + ', '.join([self.__render_variable_definition(var)
for var in parsed_op.variables])
variables_dict = '{' + ', '.join(f'"{var.name}": {var.name}'
for var in parsed_op.variables) + '}'
else:
vars_args = ''
variables_dict = 'None'
buffer.write('@classmethod')
buffer.write('# fmt: off')
with buffer.write_block(f'def execute(cls, client{vars_args}):'):
buffer.write('# fmt: off')
buffer.write(f'variables = {variables_dict}')
buffer.write('response_text = client.call(cls.__QUERY__, '
'variables=variables)')
buffer.write('return cls.from_json(response_text).data')
buffer.write('')
@staticmethod
def __render_variable_definition(var: ParsedVariableDefinition):
var_type = var.type
if var_type == 'DateTime':
var_type = 'datetime'
elif var_type == 'Cursor':
var_type = 'str'
if var.is_list:
return f'{var.name}: List[{var_type}] = []'
if not var.nullable:
return f'{var.name}: {var_type}'
return f'{var.name}: Optional[{var_type}] = {var.default_value or "None"}'
@staticmethod
def __render_field(
parsed_query: ParsedQuery, buffer: CodeChunk, field: ParsedField):
enum_names = [e.name for e in parsed_query.enums]
is_enum = field.type in enum_names
suffix = ''
field_type = field.type
if is_enum:
suffix = f' = enum_field({field.type})'
if field.type == 'DateTime':
suffix = ' = DATETIME_FIELD'
field_type = 'datetime'
if field.nullable:
suffix = f' = {field.default_value}'
buffer.write(f'{field.name}: Optional[{field_type}]{suffix}')
else:
buffer.write(f'{field.name}: {field_type}{suffix}')
@staticmethod
def __render_enum(buffer: CodeChunk, enum: ParsedEnum):
with buffer.write_block(f'class {enum.name}(Enum):'):
for value_name, value in enum.values.items():
if isinstance(value, str):
value = f'"{value}"'
buffer.write(f'{value_name} = {value}')
buffer.write('')
| #!/usr/bin/env python3
from graphql import GraphQLSchema
from .utils_codegen import CodeChunk
from .query_parser import ParsedQuery, ParsedField, ParsedObject, ParsedEnum, \
ParsedOperation, ParsedVariableDefinition
class DataclassesRenderer:
def __init__(self, schema: GraphQLSchema):
self.schema = schema
def render(self, parsed_query: ParsedQuery):
# We sort fragment nodes to be first and operations to be last because
# of dependecies
buffer = CodeChunk()
buffer.write("#!/usr/bin/env python3")
buffer.write("# @" + "generated AUTOGENERATED file. Do not Change!")
buffer.write("")
buffer.write("from dataclasses import dataclass, field")
buffer.write("from datetime import datetime")
buffer.write("from enum import Enum")
buffer.write("from functools import partial")
buffer.write("from typing import Any, Callable, List, Mapping, Optional")
buffer.write("")
buffer.write("from dataclasses_json import dataclass_json")
buffer.write("from marshmallow import fields as marshmallow_fields")
buffer.write("")
buffer.write("from .datetime_utils import fromisoformat")
buffer.write("")
self.__render_datetime_field(buffer)
# Enums
if parsed_query.enums:
buffer.write('')
self.__render_enum_field(buffer)
for enum in parsed_query.enums:
buffer.write('')
self.__render_enum(buffer, enum)
sorted_objects = sorted(parsed_query.objects, key=lambda obj: 1 if
isinstance(obj, ParsedOperation) else 0)
for obj in sorted_objects:
buffer.write('')
if isinstance(obj, ParsedObject):
self.__render_object(parsed_query, buffer, obj)
elif isinstance(obj, ParsedOperation):
self.__render_operation(parsed_query, buffer, obj)
return str(buffer)
@staticmethod
def __render_enum_field(buffer: CodeChunk):
with buffer.write_block('def enum_field(enum_type):'):
with buffer.write_block('def encode_enum(value):'):
buffer.write('return value.value')
buffer.write('')
with buffer.write_block('def decode_enum(t, value):'):
buffer.write('return t(value)')
buffer.write('')
buffer.write("return field(")
buffer.write(" metadata={")
buffer.write(' "dataclasses_json": {')
buffer.write(' "encoder": encode_enum,')
buffer.write(' "decoder": partial(decode_enum, enum_type),')
buffer.write(" }")
buffer.write(" }")
buffer.write(")")
buffer.write('')
@staticmethod
def __render_datetime_field(buffer: CodeChunk):
buffer.write('')
buffer.write("DATETIME_FIELD = field(")
buffer.write(" metadata={")
buffer.write(' "dataclasses_json": {')
buffer.write(' "encoder": datetime.isoformat,')
buffer.write(' "decoder": fromisoformat,')
buffer.write(' "mm_field": marshmallow_fields.DateTime'
+ '(format="iso"),')
buffer.write(" }")
buffer.write(" }")
buffer.write(")")
buffer.write('')
def __render_object(
self, parsed_query: ParsedQuery, buffer: CodeChunk, obj: ParsedObject):
class_parents = '' if not obj.parents else f'({", ".join(obj.parents)})'
buffer.write('@dataclass_json')
buffer.write('@dataclass')
with buffer.write_block(f'class {obj.name}{class_parents}:'):
# render child objects
for child_object in obj.children:
self.__render_object(parsed_query, buffer, child_object)
# render fields
sorted_fields = sorted(obj.fields, key=lambda f: 1 if f.nullable else 0)
for field in sorted_fields:
self.__render_field(parsed_query, buffer, field)
# pass if not children or fields
if not (obj.children or obj.fields):
buffer.write('pass')
buffer.write('')
def __render_operation(
self,
parsed_query: ParsedQuery,
buffer: CodeChunk,
parsed_op: ParsedOperation):
buffer.write('@dataclass_json')
buffer.write('@dataclass')
with buffer.write_block(f'class {parsed_op.name}:'):
buffer.write('__QUERY__ = """')
buffer.write(parsed_query.query)
buffer.write('"""')
buffer.write('')
# Render children
for child_object in parsed_op.children:
self.__render_object(parsed_query, buffer, child_object)
# operation fields
buffer.write(f'data: Optional[{parsed_op.name}Data] = None')
buffer.write('errors: Any = None')
buffer.write('')
# Execution functions
if parsed_op.variables:
vars_args = ', ' + ', '.join([self.__render_variable_definition(var)
for var in parsed_op.variables])
variables_dict = '{' + ', '.join(f'"{var.name}": {var.name}'
for var in parsed_op.variables) + '}'
else:
vars_args = ''
variables_dict = 'None'
buffer.write('@classmethod')
buffer.write('# fmt: off')
with buffer.write_block(f'def execute(cls, client{vars_args}):'):
buffer.write('# fmt: off')
buffer.write(f'variables = {variables_dict}')
buffer.write('response_text = client.call(cls.__QUERY__, '
'variables=variables)')
buffer.write('return cls.from_json(response_text).data')
buffer.write('')
@staticmethod
def __render_variable_definition(var: ParsedVariableDefinition):
var_type = var.type
if var_type == 'DateTime':
var_type = 'datetime'
elif var_type == 'Cursor':
var_type = 'str'
if var.is_list:
return f'{var.name}: List[{var_type}] = []'
if not var.nullable:
return f'{var.name}: {var_type}'
return f'{var.name}: Optional[{var_type}] = {var.default_value or "None"}'
@staticmethod
def __render_field(
parsed_query: ParsedQuery, buffer: CodeChunk, field: ParsedField):
enum_names = [e.name for e in parsed_query.enums]
is_enum = field.type in enum_names
suffix = ''
field_type = field.type
if is_enum:
suffix = f' = enum_field({field.type})'
if field.type == 'DateTime':
suffix = ' = DATETIME_FIELD'
field_type = 'datetime'
if field.nullable:
suffix = f' = {field.default_value}'
buffer.write(f'{field.name}: Optional[{field_type}]{suffix}')
else:
buffer.write(f'{field.name}: {field_type}{suffix}')
@staticmethod
def __render_enum(buffer: CodeChunk, enum: ParsedEnum):
with buffer.write_block(f'class {enum.name}(Enum):'):
for value_name, value in enum.values.items():
if isinstance(value, str):
value = f'"{value}"'
buffer.write(f'{value_name} = {value}')
buffer.write('') | en | 0.560359 | #!/usr/bin/env python3 # We sort fragment nodes to be first and operations to be last because # of dependecies # Enums # render child objects # render fields # pass if not children or fields ') buffer.write(parsed_query.query) buffer.write(' # Render children # operation fields # Execution functions | 2.310962 | 2 |
onnx_tf/handlers/backend/top_k.py | jsigee87/onnx-tensorflow | 18 | 6624853 | <reponame>jsigee87/onnx-tensorflow
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("TopK")
@tf_func(tf.nn.top_k)
class TopK(BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
x_rank = len(x.get_shape())
axes = list(range(x_rank))
axis = node.attrs.get("axis", -1)
axis = axis if axis >= 0 else axis + x_rank
if axis != x_rank - 1:
pre_perm = [a for a in axes if a != axis] + [axis]
post_perm = axes[:axis] + [x_rank - 1] + axes[axis:x_rank - 1]
x = tf.transpose(x, perm=pre_perm)
values, indices = tf.nn.top_k(x, k=node.attrs["k"])
values = tf.transpose(values, perm=post_perm)
return [values, tf.cast(indices, dtype=tf.int64)]
values, indices = tf.nn.top_k(x, k=node.attrs["k"])
return [values, tf.cast(indices, dtype=tf.int64)]
| import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("TopK")
@tf_func(tf.nn.top_k)
class TopK(BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
x_rank = len(x.get_shape())
axes = list(range(x_rank))
axis = node.attrs.get("axis", -1)
axis = axis if axis >= 0 else axis + x_rank
if axis != x_rank - 1:
pre_perm = [a for a in axes if a != axis] + [axis]
post_perm = axes[:axis] + [x_rank - 1] + axes[axis:x_rank - 1]
x = tf.transpose(x, perm=pre_perm)
values, indices = tf.nn.top_k(x, k=node.attrs["k"])
values = tf.transpose(values, perm=post_perm)
return [values, tf.cast(indices, dtype=tf.int64)]
values, indices = tf.nn.top_k(x, k=node.attrs["k"])
return [values, tf.cast(indices, dtype=tf.int64)] | none | 1 | 1.982141 | 2 | |
model/network_imnet_test.py | ruofeidu/mdif | 6 | 6624854 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.vr.perception.volume_compression.mdif.model.network_imnet."""
import tensorflow as tf
from google3.vr.perception.volume_compression.mdif.model import network_imnet
class NetworkImnetTest(tf.test.TestCase):
def test_imnet(self):
batch_size = 2
num_out_channel = 1
layer = network_imnet.ImNet(num_out_channel=num_out_channel)
x = tf.zeros((batch_size, 131), dtype=tf.float32)
output = layer(x)
self.assertSequenceEqual(output.shape, (batch_size, num_out_channel))
if __name__ == '__main__':
tf.test.main()
| #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.vr.perception.volume_compression.mdif.model.network_imnet."""
import tensorflow as tf
from google3.vr.perception.volume_compression.mdif.model import network_imnet
class NetworkImnetTest(tf.test.TestCase):
def test_imnet(self):
batch_size = 2
num_out_channel = 1
layer = network_imnet.ImNet(num_out_channel=num_out_channel)
x = tf.zeros((batch_size, 131), dtype=tf.float32)
output = layer(x)
self.assertSequenceEqual(output.shape, (batch_size, num_out_channel))
if __name__ == '__main__':
tf.test.main()
| en | 0.795502 | #!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for google3.vr.perception.volume_compression.mdif.model.network_imnet. | 1.970894 | 2 |
sdk/python/pulumi_artifactory/remote_puppet_repository.py | pulumi/terraform-provider-artifactory | 4 | 6624855 | <reponame>pulumi/terraform-provider-artifactory
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RemotePuppetRepositoryArgs', 'RemotePuppetRepository']
@pulumi.input_type
class RemotePuppetRepositoryArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
url: pulumi.Input[str],
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a RemotePuppetRepository resource.
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "url", url)
if allow_any_host_auth is not None:
pulumi.set(__self__, "allow_any_host_auth", allow_any_host_auth)
if assumed_offline_period_secs is not None:
pulumi.set(__self__, "assumed_offline_period_secs", assumed_offline_period_secs)
if blacked_out is not None:
pulumi.set(__self__, "blacked_out", blacked_out)
if block_mismatching_mime_types is not None:
pulumi.set(__self__, "block_mismatching_mime_types", block_mismatching_mime_types)
if bypass_head_requests is not None:
pulumi.set(__self__, "bypass_head_requests", bypass_head_requests)
if client_tls_certificate is not None:
pulumi.set(__self__, "client_tls_certificate", client_tls_certificate)
if content_synchronisation is not None:
pulumi.set(__self__, "content_synchronisation", content_synchronisation)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_cookie_management is not None:
pulumi.set(__self__, "enable_cookie_management", enable_cookie_management)
if excludes_pattern is not None:
pulumi.set(__self__, "excludes_pattern", excludes_pattern)
if hard_fail is not None:
pulumi.set(__self__, "hard_fail", hard_fail)
if includes_pattern is not None:
pulumi.set(__self__, "includes_pattern", includes_pattern)
if list_remote_folder_items is not None:
pulumi.set(__self__, "list_remote_folder_items", list_remote_folder_items)
if local_address is not None:
pulumi.set(__self__, "local_address", local_address)
if mismatching_mime_types_override_list is not None:
pulumi.set(__self__, "mismatching_mime_types_override_list", mismatching_mime_types_override_list)
if missed_cache_period_seconds is not None:
pulumi.set(__self__, "missed_cache_period_seconds", missed_cache_period_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if offline is not None:
pulumi.set(__self__, "offline", offline)
if password is not None:
pulumi.set(__self__, "password", password)
if priority_resolution is not None:
pulumi.set(__self__, "priority_resolution", priority_resolution)
if project_environments is not None:
pulumi.set(__self__, "project_environments", project_environments)
if project_key is not None:
pulumi.set(__self__, "project_key", project_key)
if propagate_query_params is not None:
pulumi.set(__self__, "propagate_query_params", propagate_query_params)
if property_sets is not None:
pulumi.set(__self__, "property_sets", property_sets)
if proxy is not None:
pulumi.set(__self__, "proxy", proxy)
if remote_repo_layout_ref is not None:
pulumi.set(__self__, "remote_repo_layout_ref", remote_repo_layout_ref)
if repo_layout_ref is not None:
pulumi.set(__self__, "repo_layout_ref", repo_layout_ref)
if retrieval_cache_period_seconds is not None:
pulumi.set(__self__, "retrieval_cache_period_seconds", retrieval_cache_period_seconds)
if share_configuration is not None:
pulumi.set(__self__, "share_configuration", share_configuration)
if socket_timeout_millis is not None:
pulumi.set(__self__, "socket_timeout_millis", socket_timeout_millis)
if store_artifacts_locally is not None:
pulumi.set(__self__, "store_artifacts_locally", store_artifacts_locally)
if synchronize_properties is not None:
pulumi.set(__self__, "synchronize_properties", synchronize_properties)
if unused_artifacts_cleanup_period_enabled is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_enabled", unused_artifacts_cleanup_period_enabled)
if unused_artifacts_cleanup_period_hours is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_hours", unused_artifacts_cleanup_period_hours)
if username is not None:
pulumi.set(__self__, "username", username)
if xray_index is not None:
pulumi.set(__self__, "xray_index", xray_index)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@allow_any_host_auth.setter
def allow_any_host_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_any_host_auth", value)
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@assumed_offline_period_secs.setter
def assumed_offline_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "assumed_offline_period_secs", value)
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> Optional[pulumi.Input[bool]]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@blacked_out.setter
def blacked_out(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacked_out", value)
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@block_mismatching_mime_types.setter
def block_mismatching_mime_types(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "block_mismatching_mime_types", value)
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@bypass_head_requests.setter
def bypass_head_requests(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bypass_head_requests", value)
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_tls_certificate")
@client_tls_certificate.setter
def client_tls_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_tls_certificate", value)
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]:
return pulumi.get(self, "content_synchronisation")
@content_synchronisation.setter
def content_synchronisation(self, value: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]):
pulumi.set(self, "content_synchronisation", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> Optional[pulumi.Input[bool]]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@enable_cookie_management.setter
def enable_cookie_management(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cookie_management", value)
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@excludes_pattern.setter
def excludes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "excludes_pattern", value)
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> Optional[pulumi.Input[bool]]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@hard_fail.setter
def hard_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_fail", value)
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@includes_pattern.setter
def includes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "includes_pattern", value)
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> Optional[pulumi.Input[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@list_remote_folder_items.setter
def list_remote_folder_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "list_remote_folder_items", value)
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> Optional[pulumi.Input[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@local_address.setter
def local_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_address", value)
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> Optional[pulumi.Input[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@mismatching_mime_types_override_list.setter
def mismatching_mime_types_override_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mismatching_mime_types_override_list", value)
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@missed_cache_period_seconds.setter
def missed_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "missed_cache_period_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def offline(self) -> Optional[pulumi.Input[bool]]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@offline.setter
def offline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "offline", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> Optional[pulumi.Input[bool]]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@priority_resolution.setter
def priority_resolution(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "priority_resolution", value)
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@project_environments.setter
def project_environments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "project_environments", value)
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> Optional[pulumi.Input[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@project_key.setter
def project_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_key", value)
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> Optional[pulumi.Input[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@propagate_query_params.setter
def propagate_query_params(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "propagate_query_params", value)
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property_sets.setter
def property_sets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_sets", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@remote_repo_layout_ref.setter
def remote_repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_repo_layout_ref", value)
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@repo_layout_ref.setter
def repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_layout_ref", value)
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@retrieval_cache_period_seconds.setter
def retrieval_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retrieval_cache_period_seconds", value)
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "share_configuration")
@share_configuration.setter
def share_configuration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_configuration", value)
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> Optional[pulumi.Input[int]]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@socket_timeout_millis.setter
def socket_timeout_millis(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "socket_timeout_millis", value)
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> Optional[pulumi.Input[bool]]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@store_artifacts_locally.setter
def store_artifacts_locally(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "store_artifacts_locally", value)
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> Optional[pulumi.Input[bool]]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@synchronize_properties.setter
def synchronize_properties(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "synchronize_properties", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@unused_artifacts_cleanup_period_enabled.setter
def unused_artifacts_cleanup_period_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unused_artifacts_cleanup_period_enabled", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@unused_artifacts_cleanup_period_hours.setter
def unused_artifacts_cleanup_period_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unused_artifacts_cleanup_period_hours", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
@xray_index.setter
def xray_index(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_index", value)
@pulumi.input_type
class _RemotePuppetRepositoryState:
def __init__(__self__, *,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
failed_retrieval_cache_period_secs: Optional[pulumi.Input[int]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
package_type: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering RemotePuppetRepository resources.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
if allow_any_host_auth is not None:
pulumi.set(__self__, "allow_any_host_auth", allow_any_host_auth)
if assumed_offline_period_secs is not None:
pulumi.set(__self__, "assumed_offline_period_secs", assumed_offline_period_secs)
if blacked_out is not None:
pulumi.set(__self__, "blacked_out", blacked_out)
if block_mismatching_mime_types is not None:
pulumi.set(__self__, "block_mismatching_mime_types", block_mismatching_mime_types)
if bypass_head_requests is not None:
pulumi.set(__self__, "bypass_head_requests", bypass_head_requests)
if client_tls_certificate is not None:
pulumi.set(__self__, "client_tls_certificate", client_tls_certificate)
if content_synchronisation is not None:
pulumi.set(__self__, "content_synchronisation", content_synchronisation)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_cookie_management is not None:
pulumi.set(__self__, "enable_cookie_management", enable_cookie_management)
if excludes_pattern is not None:
pulumi.set(__self__, "excludes_pattern", excludes_pattern)
if failed_retrieval_cache_period_secs is not None:
warnings.warn("""This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function""", DeprecationWarning)
pulumi.log.warn("""failed_retrieval_cache_period_secs is deprecated: This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function""")
if failed_retrieval_cache_period_secs is not None:
pulumi.set(__self__, "failed_retrieval_cache_period_secs", failed_retrieval_cache_period_secs)
if hard_fail is not None:
pulumi.set(__self__, "hard_fail", hard_fail)
if includes_pattern is not None:
pulumi.set(__self__, "includes_pattern", includes_pattern)
if key is not None:
pulumi.set(__self__, "key", key)
if list_remote_folder_items is not None:
pulumi.set(__self__, "list_remote_folder_items", list_remote_folder_items)
if local_address is not None:
pulumi.set(__self__, "local_address", local_address)
if mismatching_mime_types_override_list is not None:
pulumi.set(__self__, "mismatching_mime_types_override_list", mismatching_mime_types_override_list)
if missed_cache_period_seconds is not None:
pulumi.set(__self__, "missed_cache_period_seconds", missed_cache_period_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if offline is not None:
pulumi.set(__self__, "offline", offline)
if package_type is not None:
pulumi.set(__self__, "package_type", package_type)
if password is not None:
pulumi.set(__self__, "password", password)
if priority_resolution is not None:
pulumi.set(__self__, "priority_resolution", priority_resolution)
if project_environments is not None:
pulumi.set(__self__, "project_environments", project_environments)
if project_key is not None:
pulumi.set(__self__, "project_key", project_key)
if propagate_query_params is not None:
pulumi.set(__self__, "propagate_query_params", propagate_query_params)
if property_sets is not None:
pulumi.set(__self__, "property_sets", property_sets)
if proxy is not None:
pulumi.set(__self__, "proxy", proxy)
if remote_repo_layout_ref is not None:
pulumi.set(__self__, "remote_repo_layout_ref", remote_repo_layout_ref)
if repo_layout_ref is not None:
pulumi.set(__self__, "repo_layout_ref", repo_layout_ref)
if retrieval_cache_period_seconds is not None:
pulumi.set(__self__, "retrieval_cache_period_seconds", retrieval_cache_period_seconds)
if share_configuration is not None:
pulumi.set(__self__, "share_configuration", share_configuration)
if socket_timeout_millis is not None:
pulumi.set(__self__, "socket_timeout_millis", socket_timeout_millis)
if store_artifacts_locally is not None:
pulumi.set(__self__, "store_artifacts_locally", store_artifacts_locally)
if synchronize_properties is not None:
pulumi.set(__self__, "synchronize_properties", synchronize_properties)
if unused_artifacts_cleanup_period_enabled is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_enabled", unused_artifacts_cleanup_period_enabled)
if unused_artifacts_cleanup_period_hours is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_hours", unused_artifacts_cleanup_period_hours)
if url is not None:
pulumi.set(__self__, "url", url)
if username is not None:
pulumi.set(__self__, "username", username)
if xray_index is not None:
pulumi.set(__self__, "xray_index", xray_index)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@allow_any_host_auth.setter
def allow_any_host_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_any_host_auth", value)
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@assumed_offline_period_secs.setter
def assumed_offline_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "assumed_offline_period_secs", value)
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> Optional[pulumi.Input[bool]]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@blacked_out.setter
def blacked_out(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacked_out", value)
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@block_mismatching_mime_types.setter
def block_mismatching_mime_types(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "block_mismatching_mime_types", value)
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@bypass_head_requests.setter
def bypass_head_requests(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bypass_head_requests", value)
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_tls_certificate")
@client_tls_certificate.setter
def client_tls_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_tls_certificate", value)
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]:
return pulumi.get(self, "content_synchronisation")
@content_synchronisation.setter
def content_synchronisation(self, value: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]):
pulumi.set(self, "content_synchronisation", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> Optional[pulumi.Input[bool]]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@enable_cookie_management.setter
def enable_cookie_management(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cookie_management", value)
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@excludes_pattern.setter
def excludes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "excludes_pattern", value)
@property
@pulumi.getter(name="failedRetrievalCachePeriodSecs")
def failed_retrieval_cache_period_secs(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "failed_retrieval_cache_period_secs")
@failed_retrieval_cache_period_secs.setter
def failed_retrieval_cache_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failed_retrieval_cache_period_secs", value)
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> Optional[pulumi.Input[bool]]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@hard_fail.setter
def hard_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_fail", value)
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@includes_pattern.setter
def includes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "includes_pattern", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> Optional[pulumi.Input[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@list_remote_folder_items.setter
def list_remote_folder_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "list_remote_folder_items", value)
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> Optional[pulumi.Input[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@local_address.setter
def local_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_address", value)
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> Optional[pulumi.Input[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@mismatching_mime_types_override_list.setter
def mismatching_mime_types_override_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mismatching_mime_types_override_list", value)
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@missed_cache_period_seconds.setter
def missed_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "missed_cache_period_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def offline(self) -> Optional[pulumi.Input[bool]]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@offline.setter
def offline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "offline", value)
@property
@pulumi.getter(name="packageType")
def package_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "package_type")
@package_type.setter
def package_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "package_type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> Optional[pulumi.Input[bool]]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@priority_resolution.setter
def priority_resolution(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "priority_resolution", value)
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@project_environments.setter
def project_environments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "project_environments", value)
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> Optional[pulumi.Input[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@project_key.setter
def project_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_key", value)
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> Optional[pulumi.Input[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@propagate_query_params.setter
def propagate_query_params(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "propagate_query_params", value)
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property_sets.setter
def property_sets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_sets", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@remote_repo_layout_ref.setter
def remote_repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_repo_layout_ref", value)
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@repo_layout_ref.setter
def repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_layout_ref", value)
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@retrieval_cache_period_seconds.setter
def retrieval_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retrieval_cache_period_seconds", value)
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "share_configuration")
@share_configuration.setter
def share_configuration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_configuration", value)
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> Optional[pulumi.Input[int]]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@socket_timeout_millis.setter
def socket_timeout_millis(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "socket_timeout_millis", value)
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> Optional[pulumi.Input[bool]]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@store_artifacts_locally.setter
def store_artifacts_locally(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "store_artifacts_locally", value)
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> Optional[pulumi.Input[bool]]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@synchronize_properties.setter
def synchronize_properties(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "synchronize_properties", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@unused_artifacts_cleanup_period_enabled.setter
def unused_artifacts_cleanup_period_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unused_artifacts_cleanup_period_enabled", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@unused_artifacts_cleanup_period_hours.setter
def unused_artifacts_cleanup_period_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unused_artifacts_cleanup_period_hours", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
@xray_index.setter
def xray_index(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_index", value)
class RemotePuppetRepository(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Creates a remote Puppet repository.
Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories).
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet",
key="my-remote-puppet",
url="https://forgeapi.puppetlabs.com/")
```
## Import
Remote repositories can be imported using their name, e.g.
```sh
$ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RemotePuppetRepositoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a remote Puppet repository.
Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories).
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet",
key="my-remote-puppet",
url="https://forgeapi.puppetlabs.com/")
```
## Import
Remote repositories can be imported using their name, e.g.
```sh
$ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet
```
:param str resource_name: The name of the resource.
:param RemotePuppetRepositoryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RemotePuppetRepositoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RemotePuppetRepositoryArgs.__new__(RemotePuppetRepositoryArgs)
__props__.__dict__["allow_any_host_auth"] = allow_any_host_auth
__props__.__dict__["assumed_offline_period_secs"] = assumed_offline_period_secs
__props__.__dict__["blacked_out"] = blacked_out
__props__.__dict__["block_mismatching_mime_types"] = block_mismatching_mime_types
__props__.__dict__["bypass_head_requests"] = bypass_head_requests
__props__.__dict__["client_tls_certificate"] = client_tls_certificate
__props__.__dict__["content_synchronisation"] = content_synchronisation
__props__.__dict__["description"] = description
__props__.__dict__["enable_cookie_management"] = enable_cookie_management
__props__.__dict__["excludes_pattern"] = excludes_pattern
__props__.__dict__["hard_fail"] = hard_fail
__props__.__dict__["includes_pattern"] = includes_pattern
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["list_remote_folder_items"] = list_remote_folder_items
__props__.__dict__["local_address"] = local_address
__props__.__dict__["mismatching_mime_types_override_list"] = mismatching_mime_types_override_list
__props__.__dict__["missed_cache_period_seconds"] = missed_cache_period_seconds
__props__.__dict__["notes"] = notes
__props__.__dict__["offline"] = offline
__props__.__dict__["password"] = password
__props__.__dict__["priority_resolution"] = priority_resolution
__props__.__dict__["project_environments"] = project_environments
__props__.__dict__["project_key"] = project_key
__props__.__dict__["propagate_query_params"] = propagate_query_params
__props__.__dict__["property_sets"] = property_sets
__props__.__dict__["proxy"] = proxy
__props__.__dict__["remote_repo_layout_ref"] = remote_repo_layout_ref
__props__.__dict__["repo_layout_ref"] = repo_layout_ref
__props__.__dict__["retrieval_cache_period_seconds"] = retrieval_cache_period_seconds
__props__.__dict__["share_configuration"] = share_configuration
__props__.__dict__["socket_timeout_millis"] = socket_timeout_millis
__props__.__dict__["store_artifacts_locally"] = store_artifacts_locally
__props__.__dict__["synchronize_properties"] = synchronize_properties
__props__.__dict__["unused_artifacts_cleanup_period_enabled"] = unused_artifacts_cleanup_period_enabled
__props__.__dict__["unused_artifacts_cleanup_period_hours"] = unused_artifacts_cleanup_period_hours
if url is None and not opts.urn:
raise TypeError("Missing required property 'url'")
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["xray_index"] = xray_index
__props__.__dict__["failed_retrieval_cache_period_secs"] = None
__props__.__dict__["package_type"] = None
super(RemotePuppetRepository, __self__).__init__(
'artifactory:index/remotePuppetRepository:RemotePuppetRepository',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
failed_retrieval_cache_period_secs: Optional[pulumi.Input[int]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
package_type: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None) -> 'RemotePuppetRepository':
"""
Get an existing RemotePuppetRepository resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RemotePuppetRepositoryState.__new__(_RemotePuppetRepositoryState)
__props__.__dict__["allow_any_host_auth"] = allow_any_host_auth
__props__.__dict__["assumed_offline_period_secs"] = assumed_offline_period_secs
__props__.__dict__["blacked_out"] = blacked_out
__props__.__dict__["block_mismatching_mime_types"] = block_mismatching_mime_types
__props__.__dict__["bypass_head_requests"] = bypass_head_requests
__props__.__dict__["client_tls_certificate"] = client_tls_certificate
__props__.__dict__["content_synchronisation"] = content_synchronisation
__props__.__dict__["description"] = description
__props__.__dict__["enable_cookie_management"] = enable_cookie_management
__props__.__dict__["excludes_pattern"] = excludes_pattern
__props__.__dict__["failed_retrieval_cache_period_secs"] = failed_retrieval_cache_period_secs
__props__.__dict__["hard_fail"] = hard_fail
__props__.__dict__["includes_pattern"] = includes_pattern
__props__.__dict__["key"] = key
__props__.__dict__["list_remote_folder_items"] = list_remote_folder_items
__props__.__dict__["local_address"] = local_address
__props__.__dict__["mismatching_mime_types_override_list"] = mismatching_mime_types_override_list
__props__.__dict__["missed_cache_period_seconds"] = missed_cache_period_seconds
__props__.__dict__["notes"] = notes
__props__.__dict__["offline"] = offline
__props__.__dict__["package_type"] = package_type
__props__.__dict__["password"] = password
__props__.__dict__["priority_resolution"] = priority_resolution
__props__.__dict__["project_environments"] = project_environments
__props__.__dict__["project_key"] = project_key
__props__.__dict__["propagate_query_params"] = propagate_query_params
__props__.__dict__["property_sets"] = property_sets
__props__.__dict__["proxy"] = proxy
__props__.__dict__["remote_repo_layout_ref"] = remote_repo_layout_ref
__props__.__dict__["repo_layout_ref"] = repo_layout_ref
__props__.__dict__["retrieval_cache_period_seconds"] = retrieval_cache_period_seconds
__props__.__dict__["share_configuration"] = share_configuration
__props__.__dict__["socket_timeout_millis"] = socket_timeout_millis
__props__.__dict__["store_artifacts_locally"] = store_artifacts_locally
__props__.__dict__["synchronize_properties"] = synchronize_properties
__props__.__dict__["unused_artifacts_cleanup_period_enabled"] = unused_artifacts_cleanup_period_enabled
__props__.__dict__["unused_artifacts_cleanup_period_hours"] = unused_artifacts_cleanup_period_hours
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["xray_index"] = xray_index
return RemotePuppetRepository(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> pulumi.Output[bool]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> pulumi.Output[Optional[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> pulumi.Output[bool]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> pulumi.Output[bool]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> pulumi.Output[bool]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_tls_certificate")
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> pulumi.Output['outputs.RemotePuppetRepositoryContentSynchronisation']:
return pulumi.get(self, "content_synchronisation")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> pulumi.Output[bool]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> pulumi.Output[str]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@property
@pulumi.getter(name="failedRetrievalCachePeriodSecs")
def failed_retrieval_cache_period_secs(self) -> pulumi.Output[int]:
return pulumi.get(self, "failed_retrieval_cache_period_secs")
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> pulumi.Output[bool]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> pulumi.Output[str]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> pulumi.Output[Optional[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> pulumi.Output[Optional[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> pulumi.Output[Optional[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> pulumi.Output[int]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "notes")
@property
@pulumi.getter
def offline(self) -> pulumi.Output[bool]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "package_type")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> pulumi.Output[bool]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> pulumi.Output[Optional[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> pulumi.Output[Optional[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property
@pulumi.getter
def proxy(self) -> pulumi.Output[Optional[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> pulumi.Output[str]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> pulumi.Output[Optional[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> pulumi.Output[int]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> pulumi.Output[bool]:
return pulumi.get(self, "share_configuration")
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> pulumi.Output[int]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> pulumi.Output[bool]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> pulumi.Output[bool]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> pulumi.Output[bool]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> pulumi.Output[int]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "username")
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RemotePuppetRepositoryArgs', 'RemotePuppetRepository']
@pulumi.input_type
class RemotePuppetRepositoryArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
url: pulumi.Input[str],
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a RemotePuppetRepository resource.
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "url", url)
if allow_any_host_auth is not None:
pulumi.set(__self__, "allow_any_host_auth", allow_any_host_auth)
if assumed_offline_period_secs is not None:
pulumi.set(__self__, "assumed_offline_period_secs", assumed_offline_period_secs)
if blacked_out is not None:
pulumi.set(__self__, "blacked_out", blacked_out)
if block_mismatching_mime_types is not None:
pulumi.set(__self__, "block_mismatching_mime_types", block_mismatching_mime_types)
if bypass_head_requests is not None:
pulumi.set(__self__, "bypass_head_requests", bypass_head_requests)
if client_tls_certificate is not None:
pulumi.set(__self__, "client_tls_certificate", client_tls_certificate)
if content_synchronisation is not None:
pulumi.set(__self__, "content_synchronisation", content_synchronisation)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_cookie_management is not None:
pulumi.set(__self__, "enable_cookie_management", enable_cookie_management)
if excludes_pattern is not None:
pulumi.set(__self__, "excludes_pattern", excludes_pattern)
if hard_fail is not None:
pulumi.set(__self__, "hard_fail", hard_fail)
if includes_pattern is not None:
pulumi.set(__self__, "includes_pattern", includes_pattern)
if list_remote_folder_items is not None:
pulumi.set(__self__, "list_remote_folder_items", list_remote_folder_items)
if local_address is not None:
pulumi.set(__self__, "local_address", local_address)
if mismatching_mime_types_override_list is not None:
pulumi.set(__self__, "mismatching_mime_types_override_list", mismatching_mime_types_override_list)
if missed_cache_period_seconds is not None:
pulumi.set(__self__, "missed_cache_period_seconds", missed_cache_period_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if offline is not None:
pulumi.set(__self__, "offline", offline)
if password is not None:
pulumi.set(__self__, "password", password)
if priority_resolution is not None:
pulumi.set(__self__, "priority_resolution", priority_resolution)
if project_environments is not None:
pulumi.set(__self__, "project_environments", project_environments)
if project_key is not None:
pulumi.set(__self__, "project_key", project_key)
if propagate_query_params is not None:
pulumi.set(__self__, "propagate_query_params", propagate_query_params)
if property_sets is not None:
pulumi.set(__self__, "property_sets", property_sets)
if proxy is not None:
pulumi.set(__self__, "proxy", proxy)
if remote_repo_layout_ref is not None:
pulumi.set(__self__, "remote_repo_layout_ref", remote_repo_layout_ref)
if repo_layout_ref is not None:
pulumi.set(__self__, "repo_layout_ref", repo_layout_ref)
if retrieval_cache_period_seconds is not None:
pulumi.set(__self__, "retrieval_cache_period_seconds", retrieval_cache_period_seconds)
if share_configuration is not None:
pulumi.set(__self__, "share_configuration", share_configuration)
if socket_timeout_millis is not None:
pulumi.set(__self__, "socket_timeout_millis", socket_timeout_millis)
if store_artifacts_locally is not None:
pulumi.set(__self__, "store_artifacts_locally", store_artifacts_locally)
if synchronize_properties is not None:
pulumi.set(__self__, "synchronize_properties", synchronize_properties)
if unused_artifacts_cleanup_period_enabled is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_enabled", unused_artifacts_cleanup_period_enabled)
if unused_artifacts_cleanup_period_hours is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_hours", unused_artifacts_cleanup_period_hours)
if username is not None:
pulumi.set(__self__, "username", username)
if xray_index is not None:
pulumi.set(__self__, "xray_index", xray_index)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@allow_any_host_auth.setter
def allow_any_host_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_any_host_auth", value)
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@assumed_offline_period_secs.setter
def assumed_offline_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "assumed_offline_period_secs", value)
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> Optional[pulumi.Input[bool]]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@blacked_out.setter
def blacked_out(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacked_out", value)
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@block_mismatching_mime_types.setter
def block_mismatching_mime_types(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "block_mismatching_mime_types", value)
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@bypass_head_requests.setter
def bypass_head_requests(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bypass_head_requests", value)
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_tls_certificate")
@client_tls_certificate.setter
def client_tls_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_tls_certificate", value)
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]:
return pulumi.get(self, "content_synchronisation")
@content_synchronisation.setter
def content_synchronisation(self, value: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]):
pulumi.set(self, "content_synchronisation", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> Optional[pulumi.Input[bool]]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@enable_cookie_management.setter
def enable_cookie_management(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cookie_management", value)
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@excludes_pattern.setter
def excludes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "excludes_pattern", value)
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> Optional[pulumi.Input[bool]]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@hard_fail.setter
def hard_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_fail", value)
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@includes_pattern.setter
def includes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "includes_pattern", value)
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> Optional[pulumi.Input[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@list_remote_folder_items.setter
def list_remote_folder_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "list_remote_folder_items", value)
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> Optional[pulumi.Input[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@local_address.setter
def local_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_address", value)
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> Optional[pulumi.Input[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@mismatching_mime_types_override_list.setter
def mismatching_mime_types_override_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mismatching_mime_types_override_list", value)
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@missed_cache_period_seconds.setter
def missed_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "missed_cache_period_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def offline(self) -> Optional[pulumi.Input[bool]]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@offline.setter
def offline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "offline", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> Optional[pulumi.Input[bool]]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@priority_resolution.setter
def priority_resolution(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "priority_resolution", value)
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@project_environments.setter
def project_environments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "project_environments", value)
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> Optional[pulumi.Input[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@project_key.setter
def project_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_key", value)
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> Optional[pulumi.Input[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@propagate_query_params.setter
def propagate_query_params(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "propagate_query_params", value)
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property_sets.setter
def property_sets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_sets", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@remote_repo_layout_ref.setter
def remote_repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_repo_layout_ref", value)
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@repo_layout_ref.setter
def repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_layout_ref", value)
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@retrieval_cache_period_seconds.setter
def retrieval_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retrieval_cache_period_seconds", value)
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "share_configuration")
@share_configuration.setter
def share_configuration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_configuration", value)
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> Optional[pulumi.Input[int]]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@socket_timeout_millis.setter
def socket_timeout_millis(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "socket_timeout_millis", value)
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> Optional[pulumi.Input[bool]]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@store_artifacts_locally.setter
def store_artifacts_locally(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "store_artifacts_locally", value)
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> Optional[pulumi.Input[bool]]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@synchronize_properties.setter
def synchronize_properties(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "synchronize_properties", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@unused_artifacts_cleanup_period_enabled.setter
def unused_artifacts_cleanup_period_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unused_artifacts_cleanup_period_enabled", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@unused_artifacts_cleanup_period_hours.setter
def unused_artifacts_cleanup_period_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unused_artifacts_cleanup_period_hours", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
@xray_index.setter
def xray_index(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_index", value)
@pulumi.input_type
class _RemotePuppetRepositoryState:
def __init__(__self__, *,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
failed_retrieval_cache_period_secs: Optional[pulumi.Input[int]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
package_type: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering RemotePuppetRepository resources.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
if allow_any_host_auth is not None:
pulumi.set(__self__, "allow_any_host_auth", allow_any_host_auth)
if assumed_offline_period_secs is not None:
pulumi.set(__self__, "assumed_offline_period_secs", assumed_offline_period_secs)
if blacked_out is not None:
pulumi.set(__self__, "blacked_out", blacked_out)
if block_mismatching_mime_types is not None:
pulumi.set(__self__, "block_mismatching_mime_types", block_mismatching_mime_types)
if bypass_head_requests is not None:
pulumi.set(__self__, "bypass_head_requests", bypass_head_requests)
if client_tls_certificate is not None:
pulumi.set(__self__, "client_tls_certificate", client_tls_certificate)
if content_synchronisation is not None:
pulumi.set(__self__, "content_synchronisation", content_synchronisation)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_cookie_management is not None:
pulumi.set(__self__, "enable_cookie_management", enable_cookie_management)
if excludes_pattern is not None:
pulumi.set(__self__, "excludes_pattern", excludes_pattern)
if failed_retrieval_cache_period_secs is not None:
warnings.warn("""This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function""", DeprecationWarning)
pulumi.log.warn("""failed_retrieval_cache_period_secs is deprecated: This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function""")
if failed_retrieval_cache_period_secs is not None:
pulumi.set(__self__, "failed_retrieval_cache_period_secs", failed_retrieval_cache_period_secs)
if hard_fail is not None:
pulumi.set(__self__, "hard_fail", hard_fail)
if includes_pattern is not None:
pulumi.set(__self__, "includes_pattern", includes_pattern)
if key is not None:
pulumi.set(__self__, "key", key)
if list_remote_folder_items is not None:
pulumi.set(__self__, "list_remote_folder_items", list_remote_folder_items)
if local_address is not None:
pulumi.set(__self__, "local_address", local_address)
if mismatching_mime_types_override_list is not None:
pulumi.set(__self__, "mismatching_mime_types_override_list", mismatching_mime_types_override_list)
if missed_cache_period_seconds is not None:
pulumi.set(__self__, "missed_cache_period_seconds", missed_cache_period_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if offline is not None:
pulumi.set(__self__, "offline", offline)
if package_type is not None:
pulumi.set(__self__, "package_type", package_type)
if password is not None:
pulumi.set(__self__, "password", password)
if priority_resolution is not None:
pulumi.set(__self__, "priority_resolution", priority_resolution)
if project_environments is not None:
pulumi.set(__self__, "project_environments", project_environments)
if project_key is not None:
pulumi.set(__self__, "project_key", project_key)
if propagate_query_params is not None:
pulumi.set(__self__, "propagate_query_params", propagate_query_params)
if property_sets is not None:
pulumi.set(__self__, "property_sets", property_sets)
if proxy is not None:
pulumi.set(__self__, "proxy", proxy)
if remote_repo_layout_ref is not None:
pulumi.set(__self__, "remote_repo_layout_ref", remote_repo_layout_ref)
if repo_layout_ref is not None:
pulumi.set(__self__, "repo_layout_ref", repo_layout_ref)
if retrieval_cache_period_seconds is not None:
pulumi.set(__self__, "retrieval_cache_period_seconds", retrieval_cache_period_seconds)
if share_configuration is not None:
pulumi.set(__self__, "share_configuration", share_configuration)
if socket_timeout_millis is not None:
pulumi.set(__self__, "socket_timeout_millis", socket_timeout_millis)
if store_artifacts_locally is not None:
pulumi.set(__self__, "store_artifacts_locally", store_artifacts_locally)
if synchronize_properties is not None:
pulumi.set(__self__, "synchronize_properties", synchronize_properties)
if unused_artifacts_cleanup_period_enabled is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_enabled", unused_artifacts_cleanup_period_enabled)
if unused_artifacts_cleanup_period_hours is not None:
pulumi.set(__self__, "unused_artifacts_cleanup_period_hours", unused_artifacts_cleanup_period_hours)
if url is not None:
pulumi.set(__self__, "url", url)
if username is not None:
pulumi.set(__self__, "username", username)
if xray_index is not None:
pulumi.set(__self__, "xray_index", xray_index)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@allow_any_host_auth.setter
def allow_any_host_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_any_host_auth", value)
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@assumed_offline_period_secs.setter
def assumed_offline_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "assumed_offline_period_secs", value)
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> Optional[pulumi.Input[bool]]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@blacked_out.setter
def blacked_out(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacked_out", value)
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@block_mismatching_mime_types.setter
def block_mismatching_mime_types(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "block_mismatching_mime_types", value)
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> Optional[pulumi.Input[bool]]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@bypass_head_requests.setter
def bypass_head_requests(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bypass_head_requests", value)
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_tls_certificate")
@client_tls_certificate.setter
def client_tls_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_tls_certificate", value)
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]:
return pulumi.get(self, "content_synchronisation")
@content_synchronisation.setter
def content_synchronisation(self, value: Optional[pulumi.Input['RemotePuppetRepositoryContentSynchronisationArgs']]):
pulumi.set(self, "content_synchronisation", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> Optional[pulumi.Input[bool]]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@enable_cookie_management.setter
def enable_cookie_management(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cookie_management", value)
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@excludes_pattern.setter
def excludes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "excludes_pattern", value)
@property
@pulumi.getter(name="failedRetrievalCachePeriodSecs")
def failed_retrieval_cache_period_secs(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "failed_retrieval_cache_period_secs")
@failed_retrieval_cache_period_secs.setter
def failed_retrieval_cache_period_secs(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failed_retrieval_cache_period_secs", value)
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> Optional[pulumi.Input[bool]]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@hard_fail.setter
def hard_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hard_fail", value)
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@includes_pattern.setter
def includes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "includes_pattern", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> Optional[pulumi.Input[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@list_remote_folder_items.setter
def list_remote_folder_items(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "list_remote_folder_items", value)
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> Optional[pulumi.Input[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@local_address.setter
def local_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_address", value)
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> Optional[pulumi.Input[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@mismatching_mime_types_override_list.setter
def mismatching_mime_types_override_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mismatching_mime_types_override_list", value)
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@missed_cache_period_seconds.setter
def missed_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "missed_cache_period_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def offline(self) -> Optional[pulumi.Input[bool]]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@offline.setter
def offline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "offline", value)
@property
@pulumi.getter(name="packageType")
def package_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "package_type")
@package_type.setter
def package_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "package_type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> Optional[pulumi.Input[bool]]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@priority_resolution.setter
def priority_resolution(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "priority_resolution", value)
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@project_environments.setter
def project_environments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "project_environments", value)
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> Optional[pulumi.Input[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@project_key.setter
def project_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_key", value)
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> Optional[pulumi.Input[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@propagate_query_params.setter
def propagate_query_params(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "propagate_query_params", value)
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property_sets.setter
def property_sets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_sets", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@remote_repo_layout_ref.setter
def remote_repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remote_repo_layout_ref", value)
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@repo_layout_ref.setter
def repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_layout_ref", value)
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@retrieval_cache_period_seconds.setter
def retrieval_cache_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retrieval_cache_period_seconds", value)
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "share_configuration")
@share_configuration.setter
def share_configuration(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_configuration", value)
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> Optional[pulumi.Input[int]]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@socket_timeout_millis.setter
def socket_timeout_millis(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "socket_timeout_millis", value)
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> Optional[pulumi.Input[bool]]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@store_artifacts_locally.setter
def store_artifacts_locally(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "store_artifacts_locally", value)
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> Optional[pulumi.Input[bool]]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@synchronize_properties.setter
def synchronize_properties(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "synchronize_properties", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@unused_artifacts_cleanup_period_enabled.setter
def unused_artifacts_cleanup_period_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unused_artifacts_cleanup_period_enabled", value)
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@unused_artifacts_cleanup_period_hours.setter
def unused_artifacts_cleanup_period_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unused_artifacts_cleanup_period_hours", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
@xray_index.setter
def xray_index(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_index", value)
class RemotePuppetRepository(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Creates a remote Puppet repository.
Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories).
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet",
key="my-remote-puppet",
url="https://forgeapi.puppetlabs.com/")
```
## Import
Remote repositories can be imported using their name, e.g.
```sh
$ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RemotePuppetRepositoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a remote Puppet repository.
Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories).
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet",
key="my-remote-puppet",
url="https://forgeapi.puppetlabs.com/")
```
## Import
Remote repositories can be imported using their name, e.g.
```sh
$ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet
```
:param str resource_name: The name of the resource.
:param RemotePuppetRepositoryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RemotePuppetRepositoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RemotePuppetRepositoryArgs.__new__(RemotePuppetRepositoryArgs)
__props__.__dict__["allow_any_host_auth"] = allow_any_host_auth
__props__.__dict__["assumed_offline_period_secs"] = assumed_offline_period_secs
__props__.__dict__["blacked_out"] = blacked_out
__props__.__dict__["block_mismatching_mime_types"] = block_mismatching_mime_types
__props__.__dict__["bypass_head_requests"] = bypass_head_requests
__props__.__dict__["client_tls_certificate"] = client_tls_certificate
__props__.__dict__["content_synchronisation"] = content_synchronisation
__props__.__dict__["description"] = description
__props__.__dict__["enable_cookie_management"] = enable_cookie_management
__props__.__dict__["excludes_pattern"] = excludes_pattern
__props__.__dict__["hard_fail"] = hard_fail
__props__.__dict__["includes_pattern"] = includes_pattern
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["list_remote_folder_items"] = list_remote_folder_items
__props__.__dict__["local_address"] = local_address
__props__.__dict__["mismatching_mime_types_override_list"] = mismatching_mime_types_override_list
__props__.__dict__["missed_cache_period_seconds"] = missed_cache_period_seconds
__props__.__dict__["notes"] = notes
__props__.__dict__["offline"] = offline
__props__.__dict__["password"] = password
__props__.__dict__["priority_resolution"] = priority_resolution
__props__.__dict__["project_environments"] = project_environments
__props__.__dict__["project_key"] = project_key
__props__.__dict__["propagate_query_params"] = propagate_query_params
__props__.__dict__["property_sets"] = property_sets
__props__.__dict__["proxy"] = proxy
__props__.__dict__["remote_repo_layout_ref"] = remote_repo_layout_ref
__props__.__dict__["repo_layout_ref"] = repo_layout_ref
__props__.__dict__["retrieval_cache_period_seconds"] = retrieval_cache_period_seconds
__props__.__dict__["share_configuration"] = share_configuration
__props__.__dict__["socket_timeout_millis"] = socket_timeout_millis
__props__.__dict__["store_artifacts_locally"] = store_artifacts_locally
__props__.__dict__["synchronize_properties"] = synchronize_properties
__props__.__dict__["unused_artifacts_cleanup_period_enabled"] = unused_artifacts_cleanup_period_enabled
__props__.__dict__["unused_artifacts_cleanup_period_hours"] = unused_artifacts_cleanup_period_hours
if url is None and not opts.urn:
raise TypeError("Missing required property 'url'")
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["xray_index"] = xray_index
__props__.__dict__["failed_retrieval_cache_period_secs"] = None
__props__.__dict__["package_type"] = None
super(RemotePuppetRepository, __self__).__init__(
'artifactory:index/remotePuppetRepository:RemotePuppetRepository',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_any_host_auth: Optional[pulumi.Input[bool]] = None,
assumed_offline_period_secs: Optional[pulumi.Input[int]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
block_mismatching_mime_types: Optional[pulumi.Input[bool]] = None,
bypass_head_requests: Optional[pulumi.Input[bool]] = None,
client_tls_certificate: Optional[pulumi.Input[str]] = None,
content_synchronisation: Optional[pulumi.Input[pulumi.InputType['RemotePuppetRepositoryContentSynchronisationArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_cookie_management: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
failed_retrieval_cache_period_secs: Optional[pulumi.Input[int]] = None,
hard_fail: Optional[pulumi.Input[bool]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
list_remote_folder_items: Optional[pulumi.Input[bool]] = None,
local_address: Optional[pulumi.Input[str]] = None,
mismatching_mime_types_override_list: Optional[pulumi.Input[str]] = None,
missed_cache_period_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
offline: Optional[pulumi.Input[bool]] = None,
package_type: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
propagate_query_params: Optional[pulumi.Input[bool]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
proxy: Optional[pulumi.Input[str]] = None,
remote_repo_layout_ref: Optional[pulumi.Input[str]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
retrieval_cache_period_seconds: Optional[pulumi.Input[int]] = None,
share_configuration: Optional[pulumi.Input[bool]] = None,
socket_timeout_millis: Optional[pulumi.Input[int]] = None,
store_artifacts_locally: Optional[pulumi.Input[bool]] = None,
synchronize_properties: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_enabled: Optional[pulumi.Input[bool]] = None,
unused_artifacts_cleanup_period_hours: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None) -> 'RemotePuppetRepository':
"""
Get an existing RemotePuppetRepository resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
:param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
:param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
:param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
:param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
:param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
:param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
:param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
:param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
:param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
:param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names
:param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings
:param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
:param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
:param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
:param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties.
:param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
:param pulumi.Input[str] url: The remote repo URL.
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RemotePuppetRepositoryState.__new__(_RemotePuppetRepositoryState)
__props__.__dict__["allow_any_host_auth"] = allow_any_host_auth
__props__.__dict__["assumed_offline_period_secs"] = assumed_offline_period_secs
__props__.__dict__["blacked_out"] = blacked_out
__props__.__dict__["block_mismatching_mime_types"] = block_mismatching_mime_types
__props__.__dict__["bypass_head_requests"] = bypass_head_requests
__props__.__dict__["client_tls_certificate"] = client_tls_certificate
__props__.__dict__["content_synchronisation"] = content_synchronisation
__props__.__dict__["description"] = description
__props__.__dict__["enable_cookie_management"] = enable_cookie_management
__props__.__dict__["excludes_pattern"] = excludes_pattern
__props__.__dict__["failed_retrieval_cache_period_secs"] = failed_retrieval_cache_period_secs
__props__.__dict__["hard_fail"] = hard_fail
__props__.__dict__["includes_pattern"] = includes_pattern
__props__.__dict__["key"] = key
__props__.__dict__["list_remote_folder_items"] = list_remote_folder_items
__props__.__dict__["local_address"] = local_address
__props__.__dict__["mismatching_mime_types_override_list"] = mismatching_mime_types_override_list
__props__.__dict__["missed_cache_period_seconds"] = missed_cache_period_seconds
__props__.__dict__["notes"] = notes
__props__.__dict__["offline"] = offline
__props__.__dict__["package_type"] = package_type
__props__.__dict__["password"] = password
__props__.__dict__["priority_resolution"] = priority_resolution
__props__.__dict__["project_environments"] = project_environments
__props__.__dict__["project_key"] = project_key
__props__.__dict__["propagate_query_params"] = propagate_query_params
__props__.__dict__["property_sets"] = property_sets
__props__.__dict__["proxy"] = proxy
__props__.__dict__["remote_repo_layout_ref"] = remote_repo_layout_ref
__props__.__dict__["repo_layout_ref"] = repo_layout_ref
__props__.__dict__["retrieval_cache_period_seconds"] = retrieval_cache_period_seconds
__props__.__dict__["share_configuration"] = share_configuration
__props__.__dict__["socket_timeout_millis"] = socket_timeout_millis
__props__.__dict__["store_artifacts_locally"] = store_artifacts_locally
__props__.__dict__["synchronize_properties"] = synchronize_properties
__props__.__dict__["unused_artifacts_cleanup_period_enabled"] = unused_artifacts_cleanup_period_enabled
__props__.__dict__["unused_artifacts_cleanup_period_hours"] = unused_artifacts_cleanup_period_hours
__props__.__dict__["url"] = url
__props__.__dict__["username"] = username
__props__.__dict__["xray_index"] = xray_index
return RemotePuppetRepository(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowAnyHostAuth")
def allow_any_host_auth(self) -> pulumi.Output[bool]:
"""
Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to
any other host.
"""
return pulumi.get(self, "allow_any_host_auth")
@property
@pulumi.getter(name="assumedOfflinePeriodSecs")
def assumed_offline_period_secs(self) -> pulumi.Output[Optional[int]]:
"""
The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time,
an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed
offline. Default to 300.
"""
return pulumi.get(self, "assumed_offline_period_secs")
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> pulumi.Output[bool]:
"""
(A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact
resolution.
"""
return pulumi.get(self, "blacked_out")
@property
@pulumi.getter(name="blockMismatchingMimeTypes")
def block_mismatching_mime_types(self) -> pulumi.Output[bool]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "block_mismatching_mime_types")
@property
@pulumi.getter(name="bypassHeadRequests")
def bypass_head_requests(self) -> pulumi.Output[bool]:
"""
Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources,
HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked,
Artifactory will bypass the HEAD request and cache the artifact directly using a GET request.
"""
return pulumi.get(self, "bypass_head_requests")
@property
@pulumi.getter(name="clientTlsCertificate")
def client_tls_certificate(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_tls_certificate")
@property
@pulumi.getter(name="contentSynchronisation")
def content_synchronisation(self) -> pulumi.Output['outputs.RemotePuppetRepositoryContentSynchronisation']:
return pulumi.get(self, "content_synchronisation")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="enableCookieManagement")
def enable_cookie_management(self) -> pulumi.Output[bool]:
"""
Enables cookie management if the remote repository uses cookies to manage client state.
"""
return pulumi.get(self, "enable_cookie_management")
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> pulumi.Output[str]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@property
@pulumi.getter(name="failedRetrievalCachePeriodSecs")
def failed_retrieval_cache_period_secs(self) -> pulumi.Output[int]:
return pulumi.get(self, "failed_retrieval_cache_period_secs")
@property
@pulumi.getter(name="hardFail")
def hard_fail(self) -> pulumi.Output[bool]:
"""
When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to
communicate with this repository.
"""
return pulumi.get(self, "hard_fail")
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> pulumi.Output[str]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
A mandatory identifier for the repository that must be unique. It cannot begin with a number or
contain spaces or special characters.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="listRemoteFolderItems")
def list_remote_folder_items(self) -> pulumi.Output[Optional[bool]]:
"""
Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of
the 'Retrieval Cache Period'. Default value is 'false'.
"""
return pulumi.get(self, "list_remote_folder_items")
@property
@pulumi.getter(name="localAddress")
def local_address(self) -> pulumi.Output[Optional[str]]:
"""
The local address to be used when creating connections. Useful for specifying the interface to use on systems with
multiple network interfaces.
"""
return pulumi.get(self, "local_address")
@property
@pulumi.getter(name="mismatchingMimeTypesOverrideList")
def mismatching_mime_types_override_list(self) -> pulumi.Output[Optional[str]]:
"""
The set of mime types that should override the block_mismatching_mime_types setting. Eg:
"application/json,application/xml". Default value is empty.
"""
return pulumi.get(self, "mismatching_mime_types_override_list")
@property
@pulumi.getter(name="missedCachePeriodSeconds")
def missed_cache_period_seconds(self) -> pulumi.Output[int]:
"""
The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching.
"""
return pulumi.get(self, "missed_cache_period_seconds")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "notes")
@property
@pulumi.getter
def offline(self) -> pulumi.Output[bool]:
"""
If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved.
"""
return pulumi.get(self, "offline")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "package_type")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> pulumi.Output[bool]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> pulumi.Output[Optional[str]]:
"""
Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning
repository to a project, repository key must be prefixed with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@property
@pulumi.getter(name="propagateQueryParams")
def propagate_query_params(self) -> pulumi.Output[Optional[bool]]:
"""
When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository.
"""
return pulumi.get(self, "propagate_query_params")
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of property set names
"""
return pulumi.get(self, "property_sets")
@property
@pulumi.getter
def proxy(self) -> pulumi.Output[Optional[str]]:
"""
Proxy key from Artifactory Proxies settings
"""
return pulumi.get(self, "proxy")
@property
@pulumi.getter(name="remoteRepoLayoutRef")
def remote_repo_layout_ref(self) -> pulumi.Output[str]:
"""
Repository layout key for the remote layout mapping
"""
return pulumi.get(self, "remote_repo_layout_ref")
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> pulumi.Output[Optional[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@property
@pulumi.getter(name="retrievalCachePeriodSeconds")
def retrieval_cache_period_seconds(self) -> pulumi.Output[int]:
"""
The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field.
"""
return pulumi.get(self, "retrieval_cache_period_seconds")
@property
@pulumi.getter(name="shareConfiguration")
def share_configuration(self) -> pulumi.Output[bool]:
return pulumi.get(self, "share_configuration")
@property
@pulumi.getter(name="socketTimeoutMillis")
def socket_timeout_millis(self) -> pulumi.Output[int]:
"""
Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network
operation is considered a retrieval failure.
"""
return pulumi.get(self, "socket_timeout_millis")
@property
@pulumi.getter(name="storeArtifactsLocally")
def store_artifacts_locally(self) -> pulumi.Output[bool]:
"""
When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and
direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with
one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory
servers.
"""
return pulumi.get(self, "store_artifacts_locally")
@property
@pulumi.getter(name="synchronizeProperties")
def synchronize_properties(self) -> pulumi.Output[bool]:
"""
When set, remote artifacts are fetched along with their properties.
"""
return pulumi.get(self, "synchronize_properties")
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodEnabled")
def unused_artifacts_cleanup_period_enabled(self) -> pulumi.Output[bool]:
return pulumi.get(self, "unused_artifacts_cleanup_period_enabled")
@property
@pulumi.getter(name="unusedArtifactsCleanupPeriodHours")
def unused_artifacts_cleanup_period_hours(self) -> pulumi.Output[int]:
"""
The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value
of 0 means automatic cleanup of cached artifacts is disabled.
"""
return pulumi.get(self, "unused_artifacts_cleanup_period_hours")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
The remote repo URL.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "username")
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index") | en | 0.793742 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a RemotePuppetRepository resource. :param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. :param pulumi.Input[str] url: The remote repo URL. :param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. :param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. :param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. :param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state. :param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. :param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. :param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). :param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. :param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. :param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. :param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. :param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. :param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field :param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD" :param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. :param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. :param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names :param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings :param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping :param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository :param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. :param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. :param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. :param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties. :param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. :param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. The remote repo URL. Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Enables cookie management if the remote repository uses cookies to manage client state. List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. Setting repositories with priority will cause metadata to be merged only from repositories set with this field Project environment for assigning this repository to. Allow values: "DEV" or "PROD" Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. List of property set names Proxy key from Artifactory Proxies settings Repository layout key for the remote layout mapping Repository layout key for the local repository The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. When set, remote artifacts are fetched along with their properties. The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. Input properties used for looking up and filtering RemotePuppetRepository resources. :param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. :param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. :param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. :param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state. :param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. :param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. :param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). :param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. :param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. :param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. :param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. :param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. :param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. :param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field :param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD" :param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. :param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. :param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names :param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings :param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping :param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository :param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. :param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. :param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. :param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties. :param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. :param pulumi.Input[str] url: The remote repo URL. :param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function failed_retrieval_cache_period_secs is deprecated: This field is not returned in a get payload but is offered on the UI. It's inserted here for inclusive and informational reasons. It does not function Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Enables cookie management if the remote repository uses cookies to manage client state. List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. Setting repositories with priority will cause metadata to be merged only from repositories set with this field Project environment for assigning this repository to. Allow values: "DEV" or "PROD" Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. List of property set names Proxy key from Artifactory Proxies settings Repository layout key for the remote layout mapping Repository layout key for the local repository The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. When set, remote artifacts are fetched along with their properties. The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. The remote repo URL. Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. Creates a remote Puppet repository. Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories). ## Example Usage ```python import pulumi import pulumi_artifactory as artifactory my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet", key="my-remote-puppet", url="https://forgeapi.puppetlabs.com/") ``` ## Import Remote repositories can be imported using their name, e.g. ```sh $ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. :param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. :param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. :param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state. :param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. :param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. :param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). :param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. :param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. :param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. :param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. :param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. :param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. :param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field :param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD" :param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. :param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. :param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names :param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings :param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping :param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository :param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. :param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. :param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. :param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties. :param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. :param pulumi.Input[str] url: The remote repo URL. :param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. Creates a remote Puppet repository. Official documentation can be found [here](https://www.jfrog.com/confluence/display/JFROG/Puppet+Repositories). ## Example Usage ```python import pulumi import pulumi_artifactory as artifactory my_remote_puppet = artifactory.RemotePuppetRepository("my-remote-puppet", key="my-remote-puppet", url="https://forgeapi.puppetlabs.com/") ``` ## Import Remote repositories can be imported using their name, e.g. ```sh $ pulumi import artifactory:index/remotePuppetRepository:RemotePuppetRepository my-remote-puppet my-remote-puppet ``` :param str resource_name: The name of the resource. :param RemotePuppetRepositoryArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing RemotePuppetRepository resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] allow_any_host_auth: Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. :param pulumi.Input[int] assumed_offline_period_secs: The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. :param pulumi.Input[bool] blacked_out: (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. :param pulumi.Input[bool] block_mismatching_mime_types: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] bypass_head_requests: Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. :param pulumi.Input[bool] enable_cookie_management: Enables cookie management if the remote repository uses cookies to manage client state. :param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. :param pulumi.Input[bool] hard_fail: When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. :param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). :param pulumi.Input[str] key: A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. :param pulumi.Input[bool] list_remote_folder_items: Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. :param pulumi.Input[str] local_address: The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. :param pulumi.Input[str] mismatching_mime_types_override_list: The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. :param pulumi.Input[int] missed_cache_period_seconds: The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. :param pulumi.Input[bool] offline: If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. :param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field :param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD" :param pulumi.Input[str] project_key: Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. :param pulumi.Input[bool] propagate_query_params: When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. :param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set names :param pulumi.Input[str] proxy: Proxy key from Artifactory Proxies settings :param pulumi.Input[str] remote_repo_layout_ref: Repository layout key for the remote layout mapping :param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository :param pulumi.Input[int] retrieval_cache_period_seconds: The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. :param pulumi.Input[int] socket_timeout_millis: Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. :param pulumi.Input[bool] store_artifacts_locally: When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. :param pulumi.Input[bool] synchronize_properties: When set, remote artifacts are fetched along with their properties. :param pulumi.Input[int] unused_artifacts_cleanup_period_hours: The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. :param pulumi.Input[str] url: The remote repo URL. :param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. Also known as 'Lenient Host Authentication', Allow credentials of this repository to be used on requests redirected to any other host. The number of seconds the repository stays in assumed offline state after a connection error. At the end of this time, an online check is attempted in order to reset the offline status. A value of 0 means the repository is never assumed offline. Default to 300. (A.K.A 'Ignore Repository' on the UI) When set, the repository or its local cache do not participate in artifact resolution. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Before caching an artifact, Artifactory first sends a HEAD request to the remote resource. In some remote resources, HEAD requests are disallowed and therefore rejected, even though downloading the artifact is allowed. When checked, Artifactory will bypass the HEAD request and cache the artifact directly using a GET request. Enables cookie management if the remote repository uses cookies to manage client state. List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no artifacts are excluded. When set, Artifactory will return an error to the client that causes the build to fail if there is a failure to communicate with this repository. List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*). A mandatory identifier for the repository that must be unique. It cannot begin with a number or contain spaces or special characters. Lists the items of remote folders in simple and list browsing. The remote content is cached according to the value of the 'Retrieval Cache Period'. Default value is 'false'. The local address to be used when creating connections. Useful for specifying the interface to use on systems with multiple network interfaces. The set of mime types that should override the block_mismatching_mime_types setting. Eg: "application/json,application/xml". Default value is empty. The number of seconds to cache artifact retrieval misses (artifact not found). A value of 0 indicates no caching. If set, Artifactory does not try to fetch remote artifacts. Only locally-cached artifacts are retrieved. Setting repositories with priority will cause metadata to be merged only from repositories set with this field Project environment for assigning this repository to. Allow values: "DEV" or "PROD" Project key for assigning this repository to. Must be 3 - 10 lowercase alphanumeric characters. When assigning repository to a project, repository key must be prefixed with project key, separated by a dash. When set, if query params are included in the request to Artifactory, they will be passed on to the remote repository. List of property set names Proxy key from Artifactory Proxies settings Repository layout key for the remote layout mapping Repository layout key for the local repository The metadataRetrievalTimeoutSecs field not allowed to be bigger then retrievalCachePeriodSecs field. Network timeout (in ms) to use when establishing a connection and for unanswered requests. Timing out on a network operation is considered a retrieval failure. When set, the repository should store cached artifacts locally. When not set, artifacts are not stored locally, and direct repository-to-client streaming is used. This can be useful for multi-server setups over a high-speed LAN, with one Artifactory caching certain data on central storage, and streaming it directly to satellite pass-though Artifactory servers. When set, remote artifacts are fetched along with their properties. The number of hours to wait before an artifact is deemed "unused" and eligible for cleanup from the repository. A value of 0 means automatic cleanup of cached artifacts is disabled. The remote repo URL. Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via Xray settings. | 1.429007 | 1 |
geocoder/yandex_reverse.py | lavr/geocoder | 2 | 6624856 | <filename>geocoder/yandex_reverse.py
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.yandex import YandexResult, YandexQuery
from geocoder.location import Location
class YandexReverseResult(YandexResult):
@property
def ok(self):
return bool(self.address)
class YandexReverse(YandexQuery):
"""
Yandex
======
Yandex (Russian: Яндекс) is a Russian Internet company
which operates the largest search engine in Russia with
about 60% market share in that country.
The Yandex home page has been rated as the most popular website in Russia.
Params
------
:param location: Your search location you want geocoded.
:param lang: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param kind: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
References
----------
API Reference: http://api.yandex.com/maps/doc/geocoder/
desc/concepts/input_params.xml
"""
provider = 'yandex'
method = 'reverse'
_RESULT_CLASS = YandexReverseResult
def _build_params(self, location, provider_key, **kwargs):
x, y = Location(location).xy
self.location = u'{}, {}'.format(x, y)
return {
'geocode': self.location,
'lang': kwargs.get('lang', 'en-US'),
'kind': kwargs.get('kind', ''),
'format': 'json',
'results': kwargs.get('maxRows', 1),
'apikey': kwargs.get('apikey', ''),
}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = YandexReverse({'lat': 41.005407, 'lng': 28.978349})
g.debug()
| <filename>geocoder/yandex_reverse.py
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.yandex import YandexResult, YandexQuery
from geocoder.location import Location
class YandexReverseResult(YandexResult):
@property
def ok(self):
return bool(self.address)
class YandexReverse(YandexQuery):
"""
Yandex
======
Yandex (Russian: Яндекс) is a Russian Internet company
which operates the largest search engine in Russia with
about 60% market share in that country.
The Yandex home page has been rated as the most popular website in Russia.
Params
------
:param location: Your search location you want geocoded.
:param lang: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param kind: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
References
----------
API Reference: http://api.yandex.com/maps/doc/geocoder/
desc/concepts/input_params.xml
"""
provider = 'yandex'
method = 'reverse'
_RESULT_CLASS = YandexReverseResult
def _build_params(self, location, provider_key, **kwargs):
x, y = Location(location).xy
self.location = u'{}, {}'.format(x, y)
return {
'geocode': self.location,
'lang': kwargs.get('lang', 'en-US'),
'kind': kwargs.get('kind', ''),
'format': 'json',
'results': kwargs.get('maxRows', 1),
'apikey': kwargs.get('apikey', ''),
}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = YandexReverse({'lat': 41.005407, 'lng': 28.978349})
g.debug()
| en | 0.849806 | #!/usr/bin/python # coding: utf8 Yandex ====== Yandex (Russian: Яндекс) is a Russian Internet company which operates the largest search engine in Russia with about 60% market share in that country. The Yandex home page has been rated as the most popular website in Russia. Params ------ :param location: Your search location you want geocoded. :param lang: Chose the following language: > ru-RU — Russian (by default) > uk-UA — Ukrainian > be-BY — Belarusian > en-US — American English > en-BR — British English > tr-TR — Turkish (only for maps of Turkey) :param kind: Type of toponym (only for reverse geocoding): > house - house or building > street - street > metro - subway station > district - city district > locality - locality (city, town, village, etc.) References ---------- API Reference: http://api.yandex.com/maps/doc/geocoder/ desc/concepts/input_params.xml | 3.258921 | 3 |
python/10950_A+B_3.py | anothel/BOJ | 0 | 6624857 | <gh_stars>0
from sys import stdin
def main():
for T in range(int(stdin.readline().strip())):
A, B = map(int, stdin.readline().strip().split())
print(str(A+B))
if __name__ == "__main__":
main()
| from sys import stdin
def main():
for T in range(int(stdin.readline().strip())):
A, B = map(int, stdin.readline().strip().split())
print(str(A+B))
if __name__ == "__main__":
main() | none | 1 | 3.187444 | 3 | |
ppgan/models/discriminators/discriminator_styleganv2.py | pcwuyu/PaddleGAN | 3 | 6624858 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code was heavily based on https://github.com/rosinality/stylegan2-pytorch
# MIT License
# Copyright (c) 2019 <NAME>
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from .builder import DISCRIMINATORS
from ...modules.equalized import EqualLinear, EqualConv2D
from ...modules.fused_act import FusedLeakyReLU
from ...modules.upfirdn2d import Upfirdn2dBlur
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Upfirdn2dBlur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2D(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
))
if activate:
layers.append(FusedLeakyReLU(out_channel, bias=bias))
super().__init__(*layers)
class ResBlock(nn.Layer):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(in_channel,
out_channel,
1,
downsample=True,
activate=False,
bias=False)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
# temporally solve pow double grad problem
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
u = paddle.mean(x, axis, True, name)
out = paddle.sum((x - u) * (x - u), axis, keepdim=keepdim, name=name)
n = paddle.cast(paddle.numel(x), x.dtype) \
/ paddle.cast(paddle.numel(out), x.dtype)
if unbiased:
one_const = paddle.ones([1], x.dtype)
n = paddle.where(n > one_const, n - 1., one_const)
out /= n
return out
@DISCRIMINATORS.register()
class StyleGANv2Discriminator(nn.Layer):
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2**(i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4,
channels[4],
activation="fused_lrelu"),
EqualLinear(channels[4], 1),
)
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
group = min(batch, self.stddev_group)
stddev = out.reshape((group, -1, self.stddev_feat,
channel // self.stddev_feat, height, width))
stddev = paddle.sqrt(var(stddev, 0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
stddev = stddev.tile((group, 1, height, width))
out = paddle.concat([out, stddev], 1)
out = self.final_conv(out)
out = out.reshape((batch, -1))
out = self.final_linear(out)
return out
| # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code was heavily based on https://github.com/rosinality/stylegan2-pytorch
# MIT License
# Copyright (c) 2019 <NAME>
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from .builder import DISCRIMINATORS
from ...modules.equalized import EqualLinear, EqualConv2D
from ...modules.fused_act import FusedLeakyReLU
from ...modules.upfirdn2d import Upfirdn2dBlur
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Upfirdn2dBlur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2D(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
))
if activate:
layers.append(FusedLeakyReLU(out_channel, bias=bias))
super().__init__(*layers)
class ResBlock(nn.Layer):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(in_channel,
out_channel,
1,
downsample=True,
activate=False,
bias=False)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
# temporally solve pow double grad problem
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
u = paddle.mean(x, axis, True, name)
out = paddle.sum((x - u) * (x - u), axis, keepdim=keepdim, name=name)
n = paddle.cast(paddle.numel(x), x.dtype) \
/ paddle.cast(paddle.numel(out), x.dtype)
if unbiased:
one_const = paddle.ones([1], x.dtype)
n = paddle.where(n > one_const, n - 1., one_const)
out /= n
return out
@DISCRIMINATORS.register()
class StyleGANv2Discriminator(nn.Layer):
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2**(i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4,
channels[4],
activation="fused_lrelu"),
EqualLinear(channels[4], 1),
)
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
group = min(batch, self.stddev_group)
stddev = out.reshape((group, -1, self.stddev_feat,
channel // self.stddev_feat, height, width))
stddev = paddle.sqrt(var(stddev, 0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
stddev = stddev.tile((group, 1, height, width))
out = paddle.concat([out, stddev], 1)
out = self.final_conv(out)
out = out.reshape((batch, -1))
out = self.final_linear(out)
return out
| en | 0.86547 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # code was heavily based on https://github.com/rosinality/stylegan2-pytorch # MIT License # Copyright (c) 2019 <NAME> # temporally solve pow double grad problem | 2.062464 | 2 |
skfuzzy/image/tests/test_pad.py | MarcoMiretti/scikit-fuzzy | 5 | 6624859 | """Tests for the array pading functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
try:
from numpy.testing.decorators import skipif
except AttributeError:
from numpy.testing.dec import skipif
from _skipclass import skipclassif
from skfuzzy.image import pad
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestConstant(TestCase):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestLinearRamp(TestCase):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestReflect(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestSymmetric(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestWrap(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestStatLen(TestCase):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestEdge(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestZeroPadWidth(TestCase):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestLegacyVectorFunction(TestCase):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
return vector
a = np.arange(6).reshape(2, 3)
a = pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestNdarrayPadWidth(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
**kwargs)
def test_check_negative_stat_length(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(-3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
**kwargs)
def test_check_negative_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError2(TestCase):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError3(TestCase):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
reflect_type='odd')
def test_mode_not_set(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4)
def test_malformed_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
def test_malformed_pad_amount2(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
mode='constant')
def test_pad_too_many_axes(self):
arr = np.arange(30).reshape(5, 6)
# Attempt to pad using a 3D array equivalent
bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
assert_raises(ValueError, pad, arr, bad_shape,
mode='constant')
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TypeError1(TestCase):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
def test_str(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, 'foo')
assert_raises(TypeError, pad, arr, np.array('foo'))
def test_object(self):
class FooBar(object):
pass
arr = np.arange(30)
assert_raises(TypeError, pad, arr, FooBar())
def test_complex(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, complex(1, -1))
assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
def test_check_wrong_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
if __name__ == "__main__":
np.testing.run_module_suite()
| """Tests for the array pading functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
try:
from numpy.testing.decorators import skipif
except AttributeError:
from numpy.testing.dec import skipif
from _skipclass import skipclassif
from skfuzzy.image import pad
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestConstant(TestCase):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestLinearRamp(TestCase):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestReflect(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestSymmetric(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestWrap(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestStatLen(TestCase):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestEdge(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestZeroPadWidth(TestCase):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestLegacyVectorFunction(TestCase):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
return vector
a = np.arange(6).reshape(2, 3)
a = pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TestNdarrayPadWidth(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
**kwargs)
def test_check_negative_stat_length(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(-3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
**kwargs)
def test_check_negative_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError2(TestCase):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class ValueError3(TestCase):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
reflect_type='odd')
def test_mode_not_set(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4)
def test_malformed_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
def test_malformed_pad_amount2(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
mode='constant')
def test_pad_too_many_axes(self):
arr = np.arange(30).reshape(5, 6)
# Attempt to pad using a 3D array equivalent
bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
assert_raises(ValueError, pad, arr, bad_shape,
mode='constant')
@skipclassif(LooseVersion(np.__version__) > LooseVersion("1.8"),
"NumPy's inbuilt pad used instead")
class TypeError1(TestCase):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
def test_str(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, 'foo')
assert_raises(TypeError, pad, arr, np.array('foo'))
def test_object(self):
class FooBar(object):
pass
arr = np.arange(30)
assert_raises(TypeError, pad, arr, FooBar())
def test_complex(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, complex(1, -1))
assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
def test_check_wrong_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
if __name__ == "__main__":
np.testing.run_module_suite()
| en | 0.754407 | Tests for the array pading functions. # If input array is int, but constant_values are float, the dtype of # the array to be padded is kept # If input array is float, and constant_values are float, the dtype of # the array to be padded is kept - here retaining the float constants # Attempt to pad using a 3D array equivalent | 2.152641 | 2 |
zproject/dev_urls.py | shubhamgupta2956/zulip | 0 | 6624860 | <reponame>shubhamgupta2956/zulip
import os
from urllib.parse import urlsplit
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.views import serve as staticfiles_serve
from django.http import HttpRequest, HttpResponse
from django.urls import path
from django.views.generic import TemplateView
from django.views.static import serve
import zerver.views.auth
import zerver.views.development.email_log
import zerver.views.development.integrations
import zerver.views.development.registration
# These URLs are available only in the development environment
use_prod_static = not settings.DEBUG
urls = [
# Serve useful development environment resources (docs, coverage reports, etc.)
path('coverage/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/coverage'),
'show_indexes': True}),
path('node-coverage/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/node-coverage/lcov-report'),
'show_indexes': True}),
path('docs/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'docs/_build/html')}),
# The special no-password login endpoint for development
path('devlogin/', zerver.views.auth.login_page,
{'template_name': 'zerver/dev_login.html'}, name='zerver.views.auth.login_page'),
# Page for testing email templates
path('emails/', zerver.views.development.email_log.email_page),
path('emails/generate/', zerver.views.development.email_log.generate_all_emails),
path('emails/clear/', zerver.views.development.email_log.clear_emails),
# Listing of useful URLs and various tools for development
path('devtools/', TemplateView.as_view(template_name='zerver/dev_tools.html')),
# Register New User and Realm
path('devtools/register_user/',
zerver.views.development.registration.register_development_user,
name='zerver.views.development.registration.register_development_user'),
path('devtools/register_realm/',
zerver.views.development.registration.register_development_realm,
name='zerver.views.development.registration.register_development_realm'),
# Have easy access for error pages
path('errors/404/', TemplateView.as_view(template_name='404.html')),
path('errors/5xx/', TemplateView.as_view(template_name='500.html')),
# Add a convenient way to generate webhook messages from fixtures.
path('devtools/integrations/', zerver.views.development.integrations.dev_panel),
path('devtools/integrations/check_send_webhook_fixture_message',
zerver.views.development.integrations.check_send_webhook_fixture_message),
path('devtools/integrations/send_all_webhook_fixture_messages',
zerver.views.development.integrations.send_all_webhook_fixture_messages),
path('devtools/integrations/<integration_name>/fixtures',
zerver.views.development.integrations.get_fixtures),
]
# Serve static assets via the Django server
if use_prod_static:
urls += [
path('static/<path:path>', serve, {'document_root': settings.STATIC_ROOT}),
]
else:
def serve_static(request: HttpRequest, path: str) -> HttpResponse:
response = staticfiles_serve(request, path)
response["Access-Control-Allow-Origin"] = "*"
return response
urls += static(urlsplit(settings.STATIC_URL).path, view=serve_static)
i18n_urls = [
path('confirmation_key/', zerver.views.development.registration.confirmation_key),
]
urls += i18n_urls
# On a production instance, these files would be served by nginx.
if settings.LOCAL_UPLOADS_DIR is not None:
avatars_url = path(
'user_avatars/<path:path>',
serve,
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")},
)
urls += [avatars_url]
| import os
from urllib.parse import urlsplit
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.views import serve as staticfiles_serve
from django.http import HttpRequest, HttpResponse
from django.urls import path
from django.views.generic import TemplateView
from django.views.static import serve
import zerver.views.auth
import zerver.views.development.email_log
import zerver.views.development.integrations
import zerver.views.development.registration
# These URLs are available only in the development environment
use_prod_static = not settings.DEBUG
urls = [
# Serve useful development environment resources (docs, coverage reports, etc.)
path('coverage/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/coverage'),
'show_indexes': True}),
path('node-coverage/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'var/node-coverage/lcov-report'),
'show_indexes': True}),
path('docs/<path:path>',
serve, {'document_root':
os.path.join(settings.DEPLOY_ROOT, 'docs/_build/html')}),
# The special no-password login endpoint for development
path('devlogin/', zerver.views.auth.login_page,
{'template_name': 'zerver/dev_login.html'}, name='zerver.views.auth.login_page'),
# Page for testing email templates
path('emails/', zerver.views.development.email_log.email_page),
path('emails/generate/', zerver.views.development.email_log.generate_all_emails),
path('emails/clear/', zerver.views.development.email_log.clear_emails),
# Listing of useful URLs and various tools for development
path('devtools/', TemplateView.as_view(template_name='zerver/dev_tools.html')),
# Register New User and Realm
path('devtools/register_user/',
zerver.views.development.registration.register_development_user,
name='zerver.views.development.registration.register_development_user'),
path('devtools/register_realm/',
zerver.views.development.registration.register_development_realm,
name='zerver.views.development.registration.register_development_realm'),
# Have easy access for error pages
path('errors/404/', TemplateView.as_view(template_name='404.html')),
path('errors/5xx/', TemplateView.as_view(template_name='500.html')),
# Add a convenient way to generate webhook messages from fixtures.
path('devtools/integrations/', zerver.views.development.integrations.dev_panel),
path('devtools/integrations/check_send_webhook_fixture_message',
zerver.views.development.integrations.check_send_webhook_fixture_message),
path('devtools/integrations/send_all_webhook_fixture_messages',
zerver.views.development.integrations.send_all_webhook_fixture_messages),
path('devtools/integrations/<integration_name>/fixtures',
zerver.views.development.integrations.get_fixtures),
]
# Serve static assets via the Django server
if use_prod_static:
urls += [
path('static/<path:path>', serve, {'document_root': settings.STATIC_ROOT}),
]
else:
def serve_static(request: HttpRequest, path: str) -> HttpResponse:
response = staticfiles_serve(request, path)
response["Access-Control-Allow-Origin"] = "*"
return response
urls += static(urlsplit(settings.STATIC_URL).path, view=serve_static)
i18n_urls = [
path('confirmation_key/', zerver.views.development.registration.confirmation_key),
]
urls += i18n_urls
# On a production instance, these files would be served by nginx.
if settings.LOCAL_UPLOADS_DIR is not None:
avatars_url = path(
'user_avatars/<path:path>',
serve,
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")},
)
urls += [avatars_url] | en | 0.791765 | # These URLs are available only in the development environment # Serve useful development environment resources (docs, coverage reports, etc.) # The special no-password login endpoint for development # Page for testing email templates # Listing of useful URLs and various tools for development # Register New User and Realm # Have easy access for error pages # Add a convenient way to generate webhook messages from fixtures. # Serve static assets via the Django server # On a production instance, these files would be served by nginx. | 1.893692 | 2 |
azurelinuxagent/pa/rdma/centos.py | clearlinux/WALinuxAgent | 2 | 6624861 | <filename>azurelinuxagent/pa/rdma/centos.py
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import glob
import os
import re
import time
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.rdma import RDMAHandler
class CentOSRDMAHandler(RDMAHandler):
rdma_user_mode_package_name = 'microsoft-hyper-v-rdma'
rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma'
rdma_wrapper_package_name = 'msft-rdma-drivers'
hyper_v_package_name = "hypervkvpd"
hyper_v_package_name_new = "microsoft-hyper-v"
version_major = None
version_minor = None
def __init__(self, distro_version):
v = distro_version.split('.')
if len(v) < 2:
raise Exception('Unexpected centos version: %s' % distro_version)
self.version_major, self.version_minor = v[0], v[1]
def install_driver(self):
"""
Install the KVP daemon and the appropriate RDMA driver package for the
RDMA firmware.
"""
# Check and install the KVP deamon if it not running
time.sleep(10) # give some time for the hv_hvp_daemon to start up.
kvpd_running = RDMAHandler.is_kvp_daemon_running()
logger.info('RDMA: kvp daemon running: %s' % kvpd_running)
if not kvpd_running:
self.check_or_install_kvp_daemon()
time.sleep(10) # wait for post-install reboot or kvp to come up
# Find out RDMA firmware version and see if the existing package needs
# updating or if the package is missing altogether (and install it)
fw_version = RDMAHandler.get_rdma_version()
if not fw_version:
raise Exception('Cannot determine RDMA firmware version')
logger.info("RDMA: found firmware version: {0}".format(fw_version))
fw_version = self.get_int_rdma_version(fw_version)
installed_pkg = self.get_rdma_package_info()
if installed_pkg:
logger.info(
'RDMA: driver package present: {0}'.format(installed_pkg))
if self.is_rdma_package_up_to_date(installed_pkg, fw_version):
logger.info('RDMA: driver package is up-to-date')
return
else:
logger.info('RDMA: driver package needs updating')
self.update_rdma_package(fw_version)
else:
logger.info('RDMA: driver package is NOT installed')
self.update_rdma_package(fw_version)
def is_rdma_package_up_to_date(self, pkg, fw_version):
# Example match (pkg name, -, followed by 3 segments, fw_version and -):
# - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64
# - fw_version=142
pattern = '{0}-\d\.\d\.\d\.({1})-'.format(
self.rdma_user_mode_package_name, fw_version)
return re.match(pattern, pkg)
@staticmethod
def get_int_rdma_version(version):
s = version.split('.')
if len(s) == 0:
raise Exception('Unexpected RDMA firmware version: "%s"' % version)
return s[0]
def get_rdma_package_info(self):
"""
Returns the installed rdma package name or None
"""
ret, output = shellutil.run_get_output(
'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False)
if ret != 0:
return None
return output
def update_rdma_package(self, fw_version):
logger.info("RDMA: updating RDMA packages")
self.refresh_repos()
self.force_install_package(self.rdma_wrapper_package_name)
self.install_rdma_drivers(fw_version)
def force_install_package(self, pkg_name):
"""
Attempts to remove existing package and installs the package
"""
logger.info('RDMA: Force installing package: %s' % pkg_name)
if self.uninstall_package(pkg_name) != 0:
logger.info('RDMA: Erasing package failed but will continue')
if self.install_package(pkg_name) != 0:
raise Exception('Failed to install package "{0}"'.format(pkg_name))
logger.info('RDMA: installation completed: %s' % pkg_name)
@staticmethod
def uninstall_package(pkg_name):
return shellutil.run('yum erase -y -q {0}'.format(pkg_name))
@staticmethod
def install_package(pkg_name):
return shellutil.run('yum install -y -q {0}'.format(pkg_name))
def refresh_repos(self):
logger.info("RDMA: refreshing yum repos")
if shellutil.run('yum clean all') != 0:
raise Exception('Cleaning yum repositories failed')
if shellutil.run('yum updateinfo') != 0:
raise Exception('Failed to act on yum repo update information')
logger.info("RDMA: repositories refreshed")
def install_rdma_drivers(self, fw_version):
"""
Installs the drivers from /opt/rdma/rhel[Major][Minor] directory,
particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or
src). Tries to uninstall them first.
"""
pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format(
self.version_major, self.version_minor)
logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir))
if not os.path.isdir(pkg_dir):
raise Exception('RDMA packages directory %s is missing' % pkg_dir)
pkgs = os.listdir(pkg_dir)
logger.info('RDMA: found %d files in package directory' % len(pkgs))
# Uninstal KVP daemon first (if exists)
self.uninstall_kvp_driver_package_if_exists()
# Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*)
kmod_pkg = self.get_file_by_pattern(
pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version))
if not kmod_pkg:
raise Exception("RDMA kernel mode package not found")
kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg)
self.uninstall_pkg_and_install_from(
'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path)
# Install user mode driver (microsoft-hyper-v-rdma-*)
umod_pkg = self.get_file_by_pattern(
pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version))
if not umod_pkg:
raise Exception("RDMA user mode package not found")
umod_pkg_path = os.path.join(pkg_dir, umod_pkg)
self.uninstall_pkg_and_install_from(
'user mode', self.rdma_user_mode_package_name, umod_pkg_path)
logger.info("RDMA: driver packages installed")
self.load_driver_module()
if not self.is_driver_loaded():
logger.info("RDMA: driver module is not loaded; reboot required")
self.reboot_system()
else:
logger.info("RDMA: kernel module is loaded")
@staticmethod
def get_file_by_pattern(list, pattern):
for l in list:
if re.match(pattern, l):
return l
return None
def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path):
logger.info(
"RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path))
logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name)
if self.uninstall_package(pkg_name) == 0:
logger.info("RDMA: Successfully uninstaled %s" % pkg_name)
logger.info(
"RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path))
if self.install_package(pkg_path) != 0:
raise Exception(
"Failed to install RDMA {0} package".format(pkg_type))
@staticmethod
def is_package_installed(pkg):
"""Runs rpm -q and checks return code to find out if a package
is installed"""
return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0
def uninstall_kvp_driver_package_if_exists(self):
logger.info('RDMA: deleting existing kvp driver packages')
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for kvp_pkg in kvp_pkgs:
if not self.is_package_installed(kvp_pkg):
logger.info(
"RDMA: kvp package %s does not exist, skipping" % kvp_pkg)
else:
logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg)
if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0:
logger.info("RDMA: successfully erased package")
else:
logger.error("RDMA: failed to erase package")
def check_or_install_kvp_daemon(self):
"""Checks if kvp daemon package is installed, if not installs the
package and reboots the machine.
"""
logger.info("RDMA: Checking kvp daemon packages.")
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for pkg in kvp_pkgs:
logger.info("RDMA: Checking if package %s installed" % pkg)
installed = self.is_package_installed(pkg)
if installed:
raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg)
kvp_pkg_to_install=self.hyper_v_package_name
logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install)
logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install)
if self.install_package(kvp_pkg_to_install) != 0:
raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install)
logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install)
logger.info("RDMA: Machine will now be rebooted.")
self.reboot_system() | <filename>azurelinuxagent/pa/rdma/centos.py
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import glob
import os
import re
import time
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.rdma import RDMAHandler
class CentOSRDMAHandler(RDMAHandler):
rdma_user_mode_package_name = 'microsoft-hyper-v-rdma'
rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma'
rdma_wrapper_package_name = 'msft-rdma-drivers'
hyper_v_package_name = "hypervkvpd"
hyper_v_package_name_new = "microsoft-hyper-v"
version_major = None
version_minor = None
def __init__(self, distro_version):
v = distro_version.split('.')
if len(v) < 2:
raise Exception('Unexpected centos version: %s' % distro_version)
self.version_major, self.version_minor = v[0], v[1]
def install_driver(self):
"""
Install the KVP daemon and the appropriate RDMA driver package for the
RDMA firmware.
"""
# Check and install the KVP deamon if it not running
time.sleep(10) # give some time for the hv_hvp_daemon to start up.
kvpd_running = RDMAHandler.is_kvp_daemon_running()
logger.info('RDMA: kvp daemon running: %s' % kvpd_running)
if not kvpd_running:
self.check_or_install_kvp_daemon()
time.sleep(10) # wait for post-install reboot or kvp to come up
# Find out RDMA firmware version and see if the existing package needs
# updating or if the package is missing altogether (and install it)
fw_version = RDMAHandler.get_rdma_version()
if not fw_version:
raise Exception('Cannot determine RDMA firmware version')
logger.info("RDMA: found firmware version: {0}".format(fw_version))
fw_version = self.get_int_rdma_version(fw_version)
installed_pkg = self.get_rdma_package_info()
if installed_pkg:
logger.info(
'RDMA: driver package present: {0}'.format(installed_pkg))
if self.is_rdma_package_up_to_date(installed_pkg, fw_version):
logger.info('RDMA: driver package is up-to-date')
return
else:
logger.info('RDMA: driver package needs updating')
self.update_rdma_package(fw_version)
else:
logger.info('RDMA: driver package is NOT installed')
self.update_rdma_package(fw_version)
def is_rdma_package_up_to_date(self, pkg, fw_version):
# Example match (pkg name, -, followed by 3 segments, fw_version and -):
# - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64
# - fw_version=142
pattern = '{0}-\d\.\d\.\d\.({1})-'.format(
self.rdma_user_mode_package_name, fw_version)
return re.match(pattern, pkg)
@staticmethod
def get_int_rdma_version(version):
s = version.split('.')
if len(s) == 0:
raise Exception('Unexpected RDMA firmware version: "%s"' % version)
return s[0]
def get_rdma_package_info(self):
"""
Returns the installed rdma package name or None
"""
ret, output = shellutil.run_get_output(
'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False)
if ret != 0:
return None
return output
def update_rdma_package(self, fw_version):
logger.info("RDMA: updating RDMA packages")
self.refresh_repos()
self.force_install_package(self.rdma_wrapper_package_name)
self.install_rdma_drivers(fw_version)
def force_install_package(self, pkg_name):
"""
Attempts to remove existing package and installs the package
"""
logger.info('RDMA: Force installing package: %s' % pkg_name)
if self.uninstall_package(pkg_name) != 0:
logger.info('RDMA: Erasing package failed but will continue')
if self.install_package(pkg_name) != 0:
raise Exception('Failed to install package "{0}"'.format(pkg_name))
logger.info('RDMA: installation completed: %s' % pkg_name)
@staticmethod
def uninstall_package(pkg_name):
return shellutil.run('yum erase -y -q {0}'.format(pkg_name))
@staticmethod
def install_package(pkg_name):
return shellutil.run('yum install -y -q {0}'.format(pkg_name))
def refresh_repos(self):
logger.info("RDMA: refreshing yum repos")
if shellutil.run('yum clean all') != 0:
raise Exception('Cleaning yum repositories failed')
if shellutil.run('yum updateinfo') != 0:
raise Exception('Failed to act on yum repo update information')
logger.info("RDMA: repositories refreshed")
def install_rdma_drivers(self, fw_version):
"""
Installs the drivers from /opt/rdma/rhel[Major][Minor] directory,
particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or
src). Tries to uninstall them first.
"""
pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format(
self.version_major, self.version_minor)
logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir))
if not os.path.isdir(pkg_dir):
raise Exception('RDMA packages directory %s is missing' % pkg_dir)
pkgs = os.listdir(pkg_dir)
logger.info('RDMA: found %d files in package directory' % len(pkgs))
# Uninstal KVP daemon first (if exists)
self.uninstall_kvp_driver_package_if_exists()
# Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*)
kmod_pkg = self.get_file_by_pattern(
pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version))
if not kmod_pkg:
raise Exception("RDMA kernel mode package not found")
kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg)
self.uninstall_pkg_and_install_from(
'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path)
# Install user mode driver (microsoft-hyper-v-rdma-*)
umod_pkg = self.get_file_by_pattern(
pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version))
if not umod_pkg:
raise Exception("RDMA user mode package not found")
umod_pkg_path = os.path.join(pkg_dir, umod_pkg)
self.uninstall_pkg_and_install_from(
'user mode', self.rdma_user_mode_package_name, umod_pkg_path)
logger.info("RDMA: driver packages installed")
self.load_driver_module()
if not self.is_driver_loaded():
logger.info("RDMA: driver module is not loaded; reboot required")
self.reboot_system()
else:
logger.info("RDMA: kernel module is loaded")
@staticmethod
def get_file_by_pattern(list, pattern):
for l in list:
if re.match(pattern, l):
return l
return None
def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path):
logger.info(
"RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path))
logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name)
if self.uninstall_package(pkg_name) == 0:
logger.info("RDMA: Successfully uninstaled %s" % pkg_name)
logger.info(
"RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path))
if self.install_package(pkg_path) != 0:
raise Exception(
"Failed to install RDMA {0} package".format(pkg_type))
@staticmethod
def is_package_installed(pkg):
"""Runs rpm -q and checks return code to find out if a package
is installed"""
return shellutil.run("rpm -q %s" % pkg, chk_err=False) == 0
def uninstall_kvp_driver_package_if_exists(self):
logger.info('RDMA: deleting existing kvp driver packages')
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for kvp_pkg in kvp_pkgs:
if not self.is_package_installed(kvp_pkg):
logger.info(
"RDMA: kvp package %s does not exist, skipping" % kvp_pkg)
else:
logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg)
if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0:
logger.info("RDMA: successfully erased package")
else:
logger.error("RDMA: failed to erase package")
def check_or_install_kvp_daemon(self):
"""Checks if kvp daemon package is installed, if not installs the
package and reboots the machine.
"""
logger.info("RDMA: Checking kvp daemon packages.")
kvp_pkgs = [self.hyper_v_package_name,
self.hyper_v_package_name_new]
for pkg in kvp_pkgs:
logger.info("RDMA: Checking if package %s installed" % pkg)
installed = self.is_package_installed(pkg)
if installed:
raise Exception('RDMA: package %s is installed, but the kvp daemon is not running' % pkg)
kvp_pkg_to_install=self.hyper_v_package_name
logger.info("RDMA: no kvp drivers installed, will install '%s'" % kvp_pkg_to_install)
logger.info("RDMA: trying to install kvp package '%s'" % kvp_pkg_to_install)
if self.install_package(kvp_pkg_to_install) != 0:
raise Exception("RDMA: failed to install kvp daemon package '%s'" % kvp_pkg_to_install)
logger.info("RDMA: package '%s' successfully installed" % kvp_pkg_to_install)
logger.info("RDMA: Machine will now be rebooted.")
self.reboot_system() | en | 0.725564 | # Microsoft Azure Linux Agent # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ # Install the KVP daemon and the appropriate RDMA driver package for the RDMA firmware. # Check and install the KVP deamon if it not running # give some time for the hv_hvp_daemon to start up. # wait for post-install reboot or kvp to come up # Find out RDMA firmware version and see if the existing package needs # updating or if the package is missing altogether (and install it) # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 Returns the installed rdma package name or None Attempts to remove existing package and installs the package Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or src). Tries to uninstall them first. # Uninstal KVP daemon first (if exists) # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) # Install user mode driver (microsoft-hyper-v-rdma-*) Runs rpm -q and checks return code to find out if a package is installed Checks if kvp daemon package is installed, if not installs the package and reboots the machine. | 1.975533 | 2 |
mortgage-xgboost/e2e.py | stjordanis/blazingsql-public-demos | 1 | 6624862 | <reponame>stjordanis/blazingsql-public-demos<filename>mortgage-xgboost/e2e.py
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
import cudf
from cudf.dataframe import DataFrame
from collections import OrderedDict
import gc
from glob import glob
import os
import pyblazing
import pandas as pd
import time
from chronometer import Chronometer
from pyblazing import FileSystemType, SchemaFrom, DriverType
def register_hdfs():
print('*** Register a HDFS File System ***')
fs_status = pyblazing.register_file_system(
authority="myLocalHdfs",
type=FileSystemType.HDFS,
root="/",
params={
"host": "127.0.0.1",
"port": 54310,
"user": "hadoop",
"driverType": DriverType.LIBHDFS3,
"kerberosTicket": ""
}
)
print(fs_status)
def deregister_hdfs():
fs_status = pyblazing.deregister_file_system(authority="myLocalHdfs")
print(fs_status)
def register_posix():
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
print('*** Register a POSIX File System ***')
fs_status = pyblazing.register_file_system(
authority="mortgage",
type=FileSystemType.POSIX,
root=dir_path
)
print(fs_status)
def deregister_posix():
fs_status = pyblazing.deregister_file_system(authority="mortgage")
print(fs_status)
from libgdf_cffi import ffi, libgdf
def get_dtype_values(dtypes):
values = []
def gdf_type(type_name):
dicc = {
'str': libgdf.GDF_STRING,
'date': libgdf.GDF_DATE64,
'date64': libgdf.GDF_DATE64,
'date32': libgdf.GDF_DATE32,
'timestamp': libgdf.GDF_TIMESTAMP,
'category': libgdf.GDF_CATEGORY,
'float': libgdf.GDF_FLOAT32,
'double': libgdf.GDF_FLOAT64,
'float32': libgdf.GDF_FLOAT32,
'float64': libgdf.GDF_FLOAT64,
'short': libgdf.GDF_INT16,
'long': libgdf.GDF_INT64,
'int': libgdf.GDF_INT32,
'int32': libgdf.GDF_INT32,
'int64': libgdf.GDF_INT64,
}
if dicc.get(type_name):
return dicc[type_name]
return libgdf.GDF_INT64
for key in dtypes:
values.append( gdf_type(dtypes[key]))
print('>>>> dtyps for', dtypes.values())
print(values)
return values
def get_type_schema(path):
format = path.split('.')[-1]
if format == 'parquet':
return SchemaFrom.ParquetFile
elif format == 'csv' or format == 'psv' or format.startswith("txt"):
return SchemaFrom.CsvFile
def open_perf_table(table_ref):
for key in table_ref.keys():
sql = 'select * from main.%(table_name)s' % {"table_name": key.table_name}
return pyblazing.run_query(sql, table_ref)
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
import time
load_start_time = time.time()
names = gpu_load_names()
acq_gdf = gpu_load_acquisition_csv(acquisition_path=acq_data_path + "/Acquisition_"
+ str(year) + "Q" + str(quarter) + ".txt")
gdf = gpu_load_performance_csv(perf_file)
load_end_time = time.time()
etl_start_time = time.time()
acq_gdf_results = merge_names(acq_gdf, names)
everdf_results = create_ever_features(gdf)
delinq_merge_results = create_delinq_features(gdf)
new_everdf_results = join_ever_delinq_features(everdf_results.columns, delinq_merge_results.columns)
joined_df_results = create_joined_df(gdf.columns, new_everdf_results.columns)
del (new_everdf_results)
testdf_results = create_12_mon_features_union(joined_df_results.columns)
testdf = testdf_results.columns
new_joined_df_results = combine_joined_12_mon(joined_df_results.columns, testdf)
del (testdf)
del (joined_df_results)
perf_df_results = final_performance_delinquency(gdf.columns, new_joined_df_results.columns)
del (gdf)
del (new_joined_df_results)
final_gdf_results = join_perf_acq_gdfs(perf_df_results.columns, acq_gdf_results.columns)
del (perf_df_results)
del (acq_gdf_results)
final_gdf = last_mile_cleaning(final_gdf_results.columns)
etl_end_time = time.time()
return [final_gdf, (load_end_time - load_start_time), (etl_end_time - etl_start_time)]
def gpu_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = OrderedDict([
("loan_id", "int64"),
("monthly_reporting_period", "date"),
("servicer", "category"),
("interest_rate", "float64"),
("current_actual_upb", "float64"),
("loan_age", "float64"),
("remaining_months_to_legal_maturity", "float64"),
("adj_remaining_months_to_maturity", "float64"),
("maturity_date", "date"),
("msa", "float64"),
("current_loan_delinquency_status", "int32"),
("mod_flag", "category"),
("zero_balance_code", "category"),
("zero_balance_effective_date", "date"),
("last_paid_installment_date", "date"),
("foreclosed_after", "date"),
("disposition_date", "date"),
("foreclosure_costs", "float64"),
("prop_preservation_and_repair_costs", "float64"),
("asset_recovery_costs", "float64"),
("misc_holding_expenses", "float64"),
("holding_taxes", "float64"),
("net_sale_proceeds", "float64"),
("credit_enhancement_proceeds", "float64"),
("repurchase_make_whole_proceeds", "float64"),
("other_foreclosure_proceeds", "float64"),
("non_interest_bearing_upb", "float64"),
("principal_forgiveness_upb", "float64"),
("repurchase_make_whole_proceeds_flag", "category"),
("foreclosure_principal_write_off_amount", "float64"),
("servicing_activity_indicator", "category")
])
print(performance_path)
performance_table = pyblazing.create_table(table_name='perf', type=get_type_schema(performance_path), path=performance_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Performance CSV')
return performance_table
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
acquisition_table = pyblazing.create_table(table_name='acq', type=get_type_schema(acquisition_path), path=acquisition_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Acquisition CSV')
return acquisition_table
def gpu_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'seller_name', 'new_seller_name'
]
dtypes = OrderedDict([
("seller_name", "category"),
("new_seller_name", "category"),
])
names_table = pyblazing.create_table(table_name='names', type=get_type_schema(col_names_path), path=col_names_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Names CSV')
return names_table
def merge_names(names_table, acq_table):
chronometer = Chronometer.makeStarted()
tables = {names_table.name: names_table.columns,
acq_table.name:acq_table.columns}
query = """SELECT loan_id, orig_channel, orig_interest_rate, orig_upb, orig_loan_term,
orig_date, first_pay_date, orig_ltv, orig_cltv, num_borrowers, dti, borrower_credit_score,
first_home_buyer, loan_purpose, property_type, num_units, occupancy_status, property_state,
zip, mortgage_insurance_percent, product_type, coborrow_credit_score, mortgage_insurance_type,
relocation_mortgage_indicator, new_seller_name as seller_name
FROM main.acq as a LEFT OUTER JOIN main.names as n ON a.seller_name = n.seller_name"""
result = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create Acquisition (Merge Names)')
return result
def create_ever_features(table, **kwargs):
chronometer = Chronometer.makeStarted()
query = """SELECT loan_id,
max(current_loan_delinquency_status) >= 1 as ever_30,
max(current_loan_delinquency_status) >= 3 as ever_90,
max(current_loan_delinquency_status) >= 6 as ever_180
FROM main.perf group by loan_id"""
result = pyblazing.run_query(query, {table.name: table.columns})
Chronometer.show(chronometer, 'Create Ever Features')
return result
def create_delinq_features(table, **kwargs):
chronometer = Chronometer.makeStarted()
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_30
FROM main.perf where current_loan_delinquency_status >= 1 group by loan_id"""
result_delinq_30 = pyblazing.run_query(query, {table.name: table.columns})
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_90
FROM main.perf where current_loan_delinquency_status >= 3 group by loan_id"""
result_delinq_90 = pyblazing.run_query(query, {table.name: table.columns})
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_180
FROM main.perf where current_loan_delinquency_status >= 6 group by loan_id"""
result_delinq_180 = pyblazing.run_query(query, {table.name: table.columns})
new_tables = {"delinq_30": result_delinq_30.columns, "delinq_90": result_delinq_90.columns, "delinq_180": result_delinq_180.columns}
query = """SELECT d30.loan_id, delinquency_30, COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.delinq_30 as d30
LEFT OUTER JOIN main.delinq_90 as d90 ON d30.loan_id = d90.loan_id
LEFT OUTER JOIN main.delinq_180 as d180 ON d30.loan_id = d180.loan_id"""
result_merge = pyblazing.run_query(query, new_tables)
Chronometer.show(chronometer, 'Create deliquency features')
return result_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"everdf": everdf_tmp, "delinq": delinq_merge}
query = """SELECT everdf.loan_id as loan_id, ever_30, ever_90, ever_180,
COALESCE(delinquency_30, DATE '1970-01-01') as delinquency_30,
COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.everdf as everdf
LEFT OUTER JOIN main.delinq as delinq ON everdf.loan_id = delinq.loan_id"""
result_merge = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create ever deliquency features')
return result_merge
def create_joined_df(gdf, everdf, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"perf": gdf, "everdf": everdf}
query = """SELECT perf.loan_id as loan_id,
perf.monthly_reporting_period as mrp_timestamp,
EXTRACT(MONTH FROM perf.monthly_reporting_period) as timestamp_month,
EXTRACT(YEAR FROM perf.monthly_reporting_period) as timestamp_year,
COALESCE(perf.current_loan_delinquency_status, -1) as delinquency_12,
COALESCE(perf.current_actual_upb, 999999999.9) as upb_12,
everdf.ever_30 as ever_30,
everdf.ever_90 as ever_90,
everdf.ever_180 as ever_180,
COALESCE(everdf.delinquency_30, DATE '1970-01-01') as delinquency_30,
COALESCE(everdf.delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(everdf.delinquency_180, DATE '1970-01-01') as delinquency_180
FROM main.perf as perf
LEFT OUTER JOIN main.everdf as everdf ON perf.loan_id = everdf.loan_id"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create Joined DF')
return results
def create_12_mon_features_union(joined_df, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"joined_df": joined_df}
josh_mody_n_str = "timestamp_year * 12 + timestamp_month - 24000.0"
query = "SELECT loan_id, " + josh_mody_n_str + " as josh_mody_n, max(delinquency_12) as max_d12, min(upb_12) as min_upb_12 FROM main.joined_df as joined_df GROUP BY loan_id, " + josh_mody_n_str
mastertemp = pyblazing.run_query(query, tables)
all_temps = []
all_tokens = []
tables = {"joined_df": mastertemp.columns}
n_months = 12
for y in range(1, n_months + 1):
josh_mody_n_str = "floor((josh_mody_n - " + str(y) + ")/12.0)"
query = "SELECT loan_id, " + josh_mody_n_str + " as josh_mody_n, max(max_d12) > 3 as max_d12_gt3, min(min_upb_12) = 0 as min_upb_12_eq0, min(min_upb_12) as upb_12 FROM main.joined_df as joined_df GROUP BY loan_id, " + josh_mody_n_str
metaToken = pyblazing.run_query_get_token(query, tables)
all_tokens.append(metaToken)
for metaToken in all_tokens:
temp = pyblazing.run_query_get_results(metaToken)
all_temps.append(temp)
y = 1
tables2 = {"temp1": all_temps[0].columns}
union_query = "(SELECT loan_id, max_d12_gt3 + min_upb_12_eq0 as delinquency_12, upb_12, floor(((josh_mody_n * 12) + " + str(
24000 + (y - 1)) + ")/12) as timestamp_year, josh_mody_n * 0 + " + str(
y) + " as timestamp_month from main.temp" + str(y) + ")"
for y in range(2, n_months + 1):
tables2["temp" + str(y)] = all_temps[y-1].columns
query = " UNION ALL (SELECT loan_id, max_d12_gt3 + min_upb_12_eq0 as delinquency_12, upb_12, floor(((josh_mody_n * 12) + " + str(
24000 + (y - 1)) + ")/12) as timestamp_year, josh_mody_n * 0 + " + str(
y) + " as timestamp_month from main.temp" + str(y) + ")"
union_query = union_query + query
results = pyblazing.run_query(union_query, tables2)
Chronometer.show(chronometer, 'Create 12 month features once')
return results
def combine_joined_12_mon(joined_df, testdf, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"joined_df": joined_df, "testdf": testdf}
query = """SELECT j.loan_id, j.mrp_timestamp, j.timestamp_month, j.timestamp_year,
j.ever_30, j.ever_90, j.ever_180, j.delinquency_30, j.delinquency_90, j.delinquency_180,
t.delinquency_12, t.upb_12
FROM main.joined_df as j LEFT OUTER JOIN main.testdf as t
ON j.loan_id = t.loan_id and j.timestamp_year = t.timestamp_year and j.timestamp_month = t.timestamp_month"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Combine joind 12 month')
return results
def final_performance_delinquency(gdf, joined_df, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"gdf": gdf, "joined_df": joined_df}
query = """SELECT g.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb
FROM main.gdf as g LEFT OUTER JOIN main.joined_df as j
ON g.loan_id = j.loan_id and EXTRACT(YEAR FROM g.monthly_reporting_period) = j.timestamp_year and EXTRACT(MONTH FROM g.monthly_reporting_period) = j.timestamp_month """
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Final performance delinquency')
return results
def join_perf_acq_gdfs(perf, acq, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"perf": perf, "acq": acq}
query = """SELECT p.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb,
borrower_credit_score, dti, first_home_buyer, loan_purpose, mortgage_insurance_percent, num_borrowers, num_units, occupancy_status,
orig_channel, orig_cltv, orig_date, orig_interest_rate, orig_loan_term, orig_ltv, orig_upb, product_type, property_state, property_type,
relocation_mortgage_indicator, seller_name, zip FROM main.perf as p LEFT OUTER JOIN main.acq as a ON p.loan_id = a.loan_id"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Join performance acquitistion gdfs')
return results
def last_mile_cleaning(df, **kwargs):
chronometer = Chronometer.makeStarted()
for col, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
df[col] = df[col].cat.codes
df[col] = df[col].astype('float32')
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
for column in df.columns:
df[column] = df[column].fillna(-1)
Chronometer.show(chronometer, 'Last mile cleaning')
return df
use_registered_hdfs = False
use_registered_posix = True
if use_registered_hdfs:
register_hdfs()
elif use_registered_posix:
register_posix()
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
acq_data_path = ""
perf_data_path = ""
col_names_path = ""
if use_registered_hdfs:
acq_data_path = "hdfs://myLocalHdfs/data/acq"
perf_data_path = "hdfs://myLocalHdfs/data/perf"
col_names_path = "hdfs://myLocalHdfs/data/names.csv"
elif use_registered_posix:
acq_data_path = "file://mortgage/data/acq"
perf_data_path = "file://mortgage/data/perf"
col_names_path = "file://mortgage/data/names.csv"
else:
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
acq_data_path = dir_path + "/data/acq"
perf_data_path = dir_path + "/data/perf"
col_names_path = dir_path + "/data/names.csv"
start_year = 2000
end_year = 2000 # end_year is inclusive
start_quarter = 1
end_quarter = 1
part_count = 1 # the number of data files to train against
import time
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2**8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
'objective': 'reg:linear',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
# 'nthread', ncores[worker], # WSM may want to set this
'verbose': True
}
def range1(start, end):
return range(start, end+1)
def use_file_type_suffix(year, quarter):
if year==2001 and quarter>=2:
return True
return False
def getChunks(year, quarter):
if use_file_type_suffix(year, quarter):
return range(0, 1+1)
return range(0, 0+1)
final_cpu_df_label = None
final_cpu_df_data = None
all_load_times = []
all_etl_times = []
all_xgb_convert_times = []
for year in range1(start_year, end_year):
for quarter in range1(start_quarter, end_quarter):
for chunk in getChunks(year, quarter):
chunk_sufix = "_{}".format(chunk) if use_file_type_suffix(year, quarter) else ""
perf_file = perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + ".txt" + chunk_sufix
[gpu_df, load_time, etl_time] = run_gpu_workflow(quarter=quarter, year=year, perf_file=perf_file)
all_load_times.append(load_time)
all_etl_times.append(etl_time)
xgb_convert_start_time = time.time()
gpu_df = (gpu_df[['delinquency_12']], gpu_df[list(gpu_df.columns.difference(['delinquency_12']))])
cpu_df_label = gpu_df[0].to_pandas()
cpu_df_data = gpu_df[1].to_pandas()
del (gpu_df)
if year == start_year:
final_cpu_df_label = cpu_df_label
final_cpu_df_data = cpu_df_data
else:
final_cpu_df_label = pd.concat([final_cpu_df_label, cpu_df_label])
final_cpu_df_data = pd.concat([final_cpu_df_data, cpu_df_data])
xgb_convert_end_time = time.time()
all_xgb_convert_times.append(xgb_convert_end_time - xgb_convert_start_time)
data_train, data_test, label_train, label_test = train_test_split(final_cpu_df_data, final_cpu_df_label, test_size=0.20, random_state=42)
xgdf_train = xgb.DMatrix(data_train, label_train)
xgdf_test = xgb.DMatrix(data_test, label_test)
chronometerTrain1 = Chronometer.makeStarted()
startTime = time.time()
bst = xgb.train(dxgb_gpu_params, xgdf_train, num_boost_round=dxgb_gpu_params['nround'])
Chronometer.show(chronometerTrain1, 'Train 1')
chronometerPredict1 = Chronometer.makeStarted()
preds = bst.predict(xgdf_test)
Chronometer.show(chronometerPredict1, 'Predict 1')
labels = xgdf_test.get_label()
print('prediction error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))
endTime = time.time()
trainPredict_time = (endTime - startTime)
print("TIMES SUMMARY")
print('LOAD Time: %fs' % sum(all_load_times))
print('ETL Time: %fs' % sum(all_etl_times))
print('CONVERT Time: %fs' % sum(all_xgb_convert_times))
print('TRAIN/PREDICT Time: %fs' % trainPredict_time)
if use_registered_hdfs:
deregister_hdfs()
elif use_registered_posix:
deregister_posix()
Chronometer.show_resume()
| import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
import cudf
from cudf.dataframe import DataFrame
from collections import OrderedDict
import gc
from glob import glob
import os
import pyblazing
import pandas as pd
import time
from chronometer import Chronometer
from pyblazing import FileSystemType, SchemaFrom, DriverType
def register_hdfs():
print('*** Register a HDFS File System ***')
fs_status = pyblazing.register_file_system(
authority="myLocalHdfs",
type=FileSystemType.HDFS,
root="/",
params={
"host": "127.0.0.1",
"port": 54310,
"user": "hadoop",
"driverType": DriverType.LIBHDFS3,
"kerberosTicket": ""
}
)
print(fs_status)
def deregister_hdfs():
fs_status = pyblazing.deregister_file_system(authority="myLocalHdfs")
print(fs_status)
def register_posix():
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
print('*** Register a POSIX File System ***')
fs_status = pyblazing.register_file_system(
authority="mortgage",
type=FileSystemType.POSIX,
root=dir_path
)
print(fs_status)
def deregister_posix():
fs_status = pyblazing.deregister_file_system(authority="mortgage")
print(fs_status)
from libgdf_cffi import ffi, libgdf
def get_dtype_values(dtypes):
values = []
def gdf_type(type_name):
dicc = {
'str': libgdf.GDF_STRING,
'date': libgdf.GDF_DATE64,
'date64': libgdf.GDF_DATE64,
'date32': libgdf.GDF_DATE32,
'timestamp': libgdf.GDF_TIMESTAMP,
'category': libgdf.GDF_CATEGORY,
'float': libgdf.GDF_FLOAT32,
'double': libgdf.GDF_FLOAT64,
'float32': libgdf.GDF_FLOAT32,
'float64': libgdf.GDF_FLOAT64,
'short': libgdf.GDF_INT16,
'long': libgdf.GDF_INT64,
'int': libgdf.GDF_INT32,
'int32': libgdf.GDF_INT32,
'int64': libgdf.GDF_INT64,
}
if dicc.get(type_name):
return dicc[type_name]
return libgdf.GDF_INT64
for key in dtypes:
values.append( gdf_type(dtypes[key]))
print('>>>> dtyps for', dtypes.values())
print(values)
return values
def get_type_schema(path):
format = path.split('.')[-1]
if format == 'parquet':
return SchemaFrom.ParquetFile
elif format == 'csv' or format == 'psv' or format.startswith("txt"):
return SchemaFrom.CsvFile
def open_perf_table(table_ref):
for key in table_ref.keys():
sql = 'select * from main.%(table_name)s' % {"table_name": key.table_name}
return pyblazing.run_query(sql, table_ref)
def run_gpu_workflow(quarter=1, year=2000, perf_file="", **kwargs):
import time
load_start_time = time.time()
names = gpu_load_names()
acq_gdf = gpu_load_acquisition_csv(acquisition_path=acq_data_path + "/Acquisition_"
+ str(year) + "Q" + str(quarter) + ".txt")
gdf = gpu_load_performance_csv(perf_file)
load_end_time = time.time()
etl_start_time = time.time()
acq_gdf_results = merge_names(acq_gdf, names)
everdf_results = create_ever_features(gdf)
delinq_merge_results = create_delinq_features(gdf)
new_everdf_results = join_ever_delinq_features(everdf_results.columns, delinq_merge_results.columns)
joined_df_results = create_joined_df(gdf.columns, new_everdf_results.columns)
del (new_everdf_results)
testdf_results = create_12_mon_features_union(joined_df_results.columns)
testdf = testdf_results.columns
new_joined_df_results = combine_joined_12_mon(joined_df_results.columns, testdf)
del (testdf)
del (joined_df_results)
perf_df_results = final_performance_delinquency(gdf.columns, new_joined_df_results.columns)
del (gdf)
del (new_joined_df_results)
final_gdf_results = join_perf_acq_gdfs(perf_df_results.columns, acq_gdf_results.columns)
del (perf_df_results)
del (acq_gdf_results)
final_gdf = last_mile_cleaning(final_gdf_results.columns)
etl_end_time = time.time()
return [final_gdf, (load_end_time - load_start_time), (etl_end_time - etl_start_time)]
def gpu_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = OrderedDict([
("loan_id", "int64"),
("monthly_reporting_period", "date"),
("servicer", "category"),
("interest_rate", "float64"),
("current_actual_upb", "float64"),
("loan_age", "float64"),
("remaining_months_to_legal_maturity", "float64"),
("adj_remaining_months_to_maturity", "float64"),
("maturity_date", "date"),
("msa", "float64"),
("current_loan_delinquency_status", "int32"),
("mod_flag", "category"),
("zero_balance_code", "category"),
("zero_balance_effective_date", "date"),
("last_paid_installment_date", "date"),
("foreclosed_after", "date"),
("disposition_date", "date"),
("foreclosure_costs", "float64"),
("prop_preservation_and_repair_costs", "float64"),
("asset_recovery_costs", "float64"),
("misc_holding_expenses", "float64"),
("holding_taxes", "float64"),
("net_sale_proceeds", "float64"),
("credit_enhancement_proceeds", "float64"),
("repurchase_make_whole_proceeds", "float64"),
("other_foreclosure_proceeds", "float64"),
("non_interest_bearing_upb", "float64"),
("principal_forgiveness_upb", "float64"),
("repurchase_make_whole_proceeds_flag", "category"),
("foreclosure_principal_write_off_amount", "float64"),
("servicing_activity_indicator", "category")
])
print(performance_path)
performance_table = pyblazing.create_table(table_name='perf', type=get_type_schema(performance_path), path=performance_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Performance CSV')
return performance_table
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
acquisition_table = pyblazing.create_table(table_name='acq', type=get_type_schema(acquisition_path), path=acquisition_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Acquisition CSV')
return acquisition_table
def gpu_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'seller_name', 'new_seller_name'
]
dtypes = OrderedDict([
("seller_name", "category"),
("new_seller_name", "category"),
])
names_table = pyblazing.create_table(table_name='names', type=get_type_schema(col_names_path), path=col_names_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Names CSV')
return names_table
def merge_names(names_table, acq_table):
chronometer = Chronometer.makeStarted()
tables = {names_table.name: names_table.columns,
acq_table.name:acq_table.columns}
query = """SELECT loan_id, orig_channel, orig_interest_rate, orig_upb, orig_loan_term,
orig_date, first_pay_date, orig_ltv, orig_cltv, num_borrowers, dti, borrower_credit_score,
first_home_buyer, loan_purpose, property_type, num_units, occupancy_status, property_state,
zip, mortgage_insurance_percent, product_type, coborrow_credit_score, mortgage_insurance_type,
relocation_mortgage_indicator, new_seller_name as seller_name
FROM main.acq as a LEFT OUTER JOIN main.names as n ON a.seller_name = n.seller_name"""
result = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create Acquisition (Merge Names)')
return result
def create_ever_features(table, **kwargs):
chronometer = Chronometer.makeStarted()
query = """SELECT loan_id,
max(current_loan_delinquency_status) >= 1 as ever_30,
max(current_loan_delinquency_status) >= 3 as ever_90,
max(current_loan_delinquency_status) >= 6 as ever_180
FROM main.perf group by loan_id"""
result = pyblazing.run_query(query, {table.name: table.columns})
Chronometer.show(chronometer, 'Create Ever Features')
return result
def create_delinq_features(table, **kwargs):
chronometer = Chronometer.makeStarted()
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_30
FROM main.perf where current_loan_delinquency_status >= 1 group by loan_id"""
result_delinq_30 = pyblazing.run_query(query, {table.name: table.columns})
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_90
FROM main.perf where current_loan_delinquency_status >= 3 group by loan_id"""
result_delinq_90 = pyblazing.run_query(query, {table.name: table.columns})
query = """SELECT loan_id,
min(monthly_reporting_period) as delinquency_180
FROM main.perf where current_loan_delinquency_status >= 6 group by loan_id"""
result_delinq_180 = pyblazing.run_query(query, {table.name: table.columns})
new_tables = {"delinq_30": result_delinq_30.columns, "delinq_90": result_delinq_90.columns, "delinq_180": result_delinq_180.columns}
query = """SELECT d30.loan_id, delinquency_30, COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.delinq_30 as d30
LEFT OUTER JOIN main.delinq_90 as d90 ON d30.loan_id = d90.loan_id
LEFT OUTER JOIN main.delinq_180 as d180 ON d30.loan_id = d180.loan_id"""
result_merge = pyblazing.run_query(query, new_tables)
Chronometer.show(chronometer, 'Create deliquency features')
return result_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"everdf": everdf_tmp, "delinq": delinq_merge}
query = """SELECT everdf.loan_id as loan_id, ever_30, ever_90, ever_180,
COALESCE(delinquency_30, DATE '1970-01-01') as delinquency_30,
COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.everdf as everdf
LEFT OUTER JOIN main.delinq as delinq ON everdf.loan_id = delinq.loan_id"""
result_merge = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create ever deliquency features')
return result_merge
def create_joined_df(gdf, everdf, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"perf": gdf, "everdf": everdf}
query = """SELECT perf.loan_id as loan_id,
perf.monthly_reporting_period as mrp_timestamp,
EXTRACT(MONTH FROM perf.monthly_reporting_period) as timestamp_month,
EXTRACT(YEAR FROM perf.monthly_reporting_period) as timestamp_year,
COALESCE(perf.current_loan_delinquency_status, -1) as delinquency_12,
COALESCE(perf.current_actual_upb, 999999999.9) as upb_12,
everdf.ever_30 as ever_30,
everdf.ever_90 as ever_90,
everdf.ever_180 as ever_180,
COALESCE(everdf.delinquency_30, DATE '1970-01-01') as delinquency_30,
COALESCE(everdf.delinquency_90, DATE '1970-01-01') as delinquency_90,
COALESCE(everdf.delinquency_180, DATE '1970-01-01') as delinquency_180
FROM main.perf as perf
LEFT OUTER JOIN main.everdf as everdf ON perf.loan_id = everdf.loan_id"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Create Joined DF')
return results
def create_12_mon_features_union(joined_df, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"joined_df": joined_df}
josh_mody_n_str = "timestamp_year * 12 + timestamp_month - 24000.0"
query = "SELECT loan_id, " + josh_mody_n_str + " as josh_mody_n, max(delinquency_12) as max_d12, min(upb_12) as min_upb_12 FROM main.joined_df as joined_df GROUP BY loan_id, " + josh_mody_n_str
mastertemp = pyblazing.run_query(query, tables)
all_temps = []
all_tokens = []
tables = {"joined_df": mastertemp.columns}
n_months = 12
for y in range(1, n_months + 1):
josh_mody_n_str = "floor((josh_mody_n - " + str(y) + ")/12.0)"
query = "SELECT loan_id, " + josh_mody_n_str + " as josh_mody_n, max(max_d12) > 3 as max_d12_gt3, min(min_upb_12) = 0 as min_upb_12_eq0, min(min_upb_12) as upb_12 FROM main.joined_df as joined_df GROUP BY loan_id, " + josh_mody_n_str
metaToken = pyblazing.run_query_get_token(query, tables)
all_tokens.append(metaToken)
for metaToken in all_tokens:
temp = pyblazing.run_query_get_results(metaToken)
all_temps.append(temp)
y = 1
tables2 = {"temp1": all_temps[0].columns}
union_query = "(SELECT loan_id, max_d12_gt3 + min_upb_12_eq0 as delinquency_12, upb_12, floor(((josh_mody_n * 12) + " + str(
24000 + (y - 1)) + ")/12) as timestamp_year, josh_mody_n * 0 + " + str(
y) + " as timestamp_month from main.temp" + str(y) + ")"
for y in range(2, n_months + 1):
tables2["temp" + str(y)] = all_temps[y-1].columns
query = " UNION ALL (SELECT loan_id, max_d12_gt3 + min_upb_12_eq0 as delinquency_12, upb_12, floor(((josh_mody_n * 12) + " + str(
24000 + (y - 1)) + ")/12) as timestamp_year, josh_mody_n * 0 + " + str(
y) + " as timestamp_month from main.temp" + str(y) + ")"
union_query = union_query + query
results = pyblazing.run_query(union_query, tables2)
Chronometer.show(chronometer, 'Create 12 month features once')
return results
def combine_joined_12_mon(joined_df, testdf, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"joined_df": joined_df, "testdf": testdf}
query = """SELECT j.loan_id, j.mrp_timestamp, j.timestamp_month, j.timestamp_year,
j.ever_30, j.ever_90, j.ever_180, j.delinquency_30, j.delinquency_90, j.delinquency_180,
t.delinquency_12, t.upb_12
FROM main.joined_df as j LEFT OUTER JOIN main.testdf as t
ON j.loan_id = t.loan_id and j.timestamp_year = t.timestamp_year and j.timestamp_month = t.timestamp_month"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Combine joind 12 month')
return results
def final_performance_delinquency(gdf, joined_df, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"gdf": gdf, "joined_df": joined_df}
query = """SELECT g.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb
FROM main.gdf as g LEFT OUTER JOIN main.joined_df as j
ON g.loan_id = j.loan_id and EXTRACT(YEAR FROM g.monthly_reporting_period) = j.timestamp_year and EXTRACT(MONTH FROM g.monthly_reporting_period) = j.timestamp_month """
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Final performance delinquency')
return results
def join_perf_acq_gdfs(perf, acq, **kwargs):
chronometer = Chronometer.makeStarted()
tables = {"perf": perf, "acq": acq}
query = """SELECT p.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb,
borrower_credit_score, dti, first_home_buyer, loan_purpose, mortgage_insurance_percent, num_borrowers, num_units, occupancy_status,
orig_channel, orig_cltv, orig_date, orig_interest_rate, orig_loan_term, orig_ltv, orig_upb, product_type, property_state, property_type,
relocation_mortgage_indicator, seller_name, zip FROM main.perf as p LEFT OUTER JOIN main.acq as a ON p.loan_id = a.loan_id"""
results = pyblazing.run_query(query, tables)
Chronometer.show(chronometer, 'Join performance acquitistion gdfs')
return results
def last_mile_cleaning(df, **kwargs):
chronometer = Chronometer.makeStarted()
for col, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
df[col] = df[col].cat.codes
df[col] = df[col].astype('float32')
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
for column in df.columns:
df[column] = df[column].fillna(-1)
Chronometer.show(chronometer, 'Last mile cleaning')
return df
use_registered_hdfs = False
use_registered_posix = True
if use_registered_hdfs:
register_hdfs()
elif use_registered_posix:
register_posix()
# to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly
acq_data_path = ""
perf_data_path = ""
col_names_path = ""
if use_registered_hdfs:
acq_data_path = "hdfs://myLocalHdfs/data/acq"
perf_data_path = "hdfs://myLocalHdfs/data/perf"
col_names_path = "hdfs://myLocalHdfs/data/names.csv"
elif use_registered_posix:
acq_data_path = "file://mortgage/data/acq"
perf_data_path = "file://mortgage/data/perf"
col_names_path = "file://mortgage/data/names.csv"
else:
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
acq_data_path = dir_path + "/data/acq"
perf_data_path = dir_path + "/data/perf"
col_names_path = dir_path + "/data/names.csv"
start_year = 2000
end_year = 2000 # end_year is inclusive
start_quarter = 1
end_quarter = 1
part_count = 1 # the number of data files to train against
import time
dxgb_gpu_params = {
'nround': 100,
'max_depth': 8,
'max_leaves': 2**8,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'scale_pos_weight': 2,
'min_child_weight': 30,
'tree_method': 'gpu_hist',
'n_gpus': 1,
'distributed_dask': True,
'loss': 'ls',
'objective': 'reg:linear',
'max_features': 'auto',
'criterion': 'friedman_mse',
'grow_policy': 'lossguide',
# 'nthread', ncores[worker], # WSM may want to set this
'verbose': True
}
def range1(start, end):
return range(start, end+1)
def use_file_type_suffix(year, quarter):
if year==2001 and quarter>=2:
return True
return False
def getChunks(year, quarter):
if use_file_type_suffix(year, quarter):
return range(0, 1+1)
return range(0, 0+1)
final_cpu_df_label = None
final_cpu_df_data = None
all_load_times = []
all_etl_times = []
all_xgb_convert_times = []
for year in range1(start_year, end_year):
for quarter in range1(start_quarter, end_quarter):
for chunk in getChunks(year, quarter):
chunk_sufix = "_{}".format(chunk) if use_file_type_suffix(year, quarter) else ""
perf_file = perf_data_path + "/Performance_" + str(year) + "Q" + str(quarter) + ".txt" + chunk_sufix
[gpu_df, load_time, etl_time] = run_gpu_workflow(quarter=quarter, year=year, perf_file=perf_file)
all_load_times.append(load_time)
all_etl_times.append(etl_time)
xgb_convert_start_time = time.time()
gpu_df = (gpu_df[['delinquency_12']], gpu_df[list(gpu_df.columns.difference(['delinquency_12']))])
cpu_df_label = gpu_df[0].to_pandas()
cpu_df_data = gpu_df[1].to_pandas()
del (gpu_df)
if year == start_year:
final_cpu_df_label = cpu_df_label
final_cpu_df_data = cpu_df_data
else:
final_cpu_df_label = pd.concat([final_cpu_df_label, cpu_df_label])
final_cpu_df_data = pd.concat([final_cpu_df_data, cpu_df_data])
xgb_convert_end_time = time.time()
all_xgb_convert_times.append(xgb_convert_end_time - xgb_convert_start_time)
data_train, data_test, label_train, label_test = train_test_split(final_cpu_df_data, final_cpu_df_label, test_size=0.20, random_state=42)
xgdf_train = xgb.DMatrix(data_train, label_train)
xgdf_test = xgb.DMatrix(data_test, label_test)
chronometerTrain1 = Chronometer.makeStarted()
startTime = time.time()
bst = xgb.train(dxgb_gpu_params, xgdf_train, num_boost_round=dxgb_gpu_params['nround'])
Chronometer.show(chronometerTrain1, 'Train 1')
chronometerPredict1 = Chronometer.makeStarted()
preds = bst.predict(xgdf_test)
Chronometer.show(chronometerPredict1, 'Predict 1')
labels = xgdf_test.get_label()
print('prediction error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))
endTime = time.time()
trainPredict_time = (endTime - startTime)
print("TIMES SUMMARY")
print('LOAD Time: %fs' % sum(all_load_times))
print('ETL Time: %fs' % sum(all_etl_times))
print('CONVERT Time: %fs' % sum(all_xgb_convert_times))
print('TRAIN/PREDICT Time: %fs' % trainPredict_time)
if use_registered_hdfs:
deregister_hdfs()
elif use_registered_posix:
deregister_posix()
Chronometer.show_resume() | en | 0.674004 | Loads performance data Returns ------- GPU DataFrame Loads acquisition data Returns ------- GPU DataFrame Loads names used for renaming the banks Returns ------- GPU DataFrame SELECT loan_id, orig_channel, orig_interest_rate, orig_upb, orig_loan_term, orig_date, first_pay_date, orig_ltv, orig_cltv, num_borrowers, dti, borrower_credit_score, first_home_buyer, loan_purpose, property_type, num_units, occupancy_status, property_state, zip, mortgage_insurance_percent, product_type, coborrow_credit_score, mortgage_insurance_type, relocation_mortgage_indicator, new_seller_name as seller_name FROM main.acq as a LEFT OUTER JOIN main.names as n ON a.seller_name = n.seller_name SELECT loan_id, max(current_loan_delinquency_status) >= 1 as ever_30, max(current_loan_delinquency_status) >= 3 as ever_90, max(current_loan_delinquency_status) >= 6 as ever_180 FROM main.perf group by loan_id SELECT loan_id, min(monthly_reporting_period) as delinquency_30 FROM main.perf where current_loan_delinquency_status >= 1 group by loan_id SELECT loan_id, min(monthly_reporting_period) as delinquency_90 FROM main.perf where current_loan_delinquency_status >= 3 group by loan_id SELECT loan_id, min(monthly_reporting_period) as delinquency_180 FROM main.perf where current_loan_delinquency_status >= 6 group by loan_id SELECT d30.loan_id, delinquency_30, COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90, COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.delinq_30 as d30 LEFT OUTER JOIN main.delinq_90 as d90 ON d30.loan_id = d90.loan_id LEFT OUTER JOIN main.delinq_180 as d180 ON d30.loan_id = d180.loan_id SELECT everdf.loan_id as loan_id, ever_30, ever_90, ever_180, COALESCE(delinquency_30, DATE '1970-01-01') as delinquency_30, COALESCE(delinquency_90, DATE '1970-01-01') as delinquency_90, COALESCE(delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.everdf as everdf LEFT OUTER JOIN main.delinq as delinq ON everdf.loan_id = delinq.loan_id SELECT perf.loan_id as loan_id, perf.monthly_reporting_period as mrp_timestamp, EXTRACT(MONTH FROM perf.monthly_reporting_period) as timestamp_month, EXTRACT(YEAR FROM perf.monthly_reporting_period) as timestamp_year, COALESCE(perf.current_loan_delinquency_status, -1) as delinquency_12, COALESCE(perf.current_actual_upb, 999999999.9) as upb_12, everdf.ever_30 as ever_30, everdf.ever_90 as ever_90, everdf.ever_180 as ever_180, COALESCE(everdf.delinquency_30, DATE '1970-01-01') as delinquency_30, COALESCE(everdf.delinquency_90, DATE '1970-01-01') as delinquency_90, COALESCE(everdf.delinquency_180, DATE '1970-01-01') as delinquency_180 FROM main.perf as perf LEFT OUTER JOIN main.everdf as everdf ON perf.loan_id = everdf.loan_id SELECT j.loan_id, j.mrp_timestamp, j.timestamp_month, j.timestamp_year, j.ever_30, j.ever_90, j.ever_180, j.delinquency_30, j.delinquency_90, j.delinquency_180, t.delinquency_12, t.upb_12 FROM main.joined_df as j LEFT OUTER JOIN main.testdf as t ON j.loan_id = t.loan_id and j.timestamp_year = t.timestamp_year and j.timestamp_month = t.timestamp_month SELECT g.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb FROM main.gdf as g LEFT OUTER JOIN main.joined_df as j ON g.loan_id = j.loan_id and EXTRACT(YEAR FROM g.monthly_reporting_period) = j.timestamp_year and EXTRACT(MONTH FROM g.monthly_reporting_period) = j.timestamp_month SELECT p.loan_id, current_actual_upb, current_loan_delinquency_status, delinquency_12, interest_rate, loan_age, mod_flag, msa, non_interest_bearing_upb, borrower_credit_score, dti, first_home_buyer, loan_purpose, mortgage_insurance_percent, num_borrowers, num_units, occupancy_status, orig_channel, orig_cltv, orig_date, orig_interest_rate, orig_loan_term, orig_ltv, orig_upb, product_type, property_state, property_type, relocation_mortgage_indicator, seller_name, zip FROM main.perf as p LEFT OUTER JOIN main.acq as a ON p.loan_id = a.loan_id # to download data for this notebook, visit https://rapidsai.github.io/demos/datasets/mortgage-data and update the following paths accordingly # end_year is inclusive # the number of data files to train against # 'nthread', ncores[worker], # WSM may want to set this | 2.261948 | 2 |
src/python/grpcio_tests/tests/interop/server.py | txl0591/grpc | 117 | 6624863 | <gh_stars>100-1000
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC interoperability test server."""
import argparse
from concurrent import futures
import logging
import time
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import methods
from tests.interop import resources
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def serve():
parser = argparse.ArgumentParser()
parser.add_argument('--port', help='the port on which to serve', type=int)
parser.add_argument(
'--use_tls',
help='require a secure connection',
default=False,
type=resources.parse_bool)
args = parser.parse_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
server)
if args.use_tls:
private_key = resources.private_key()
certificate_chain = resources.certificate_chain()
credentials = grpc.ssl_server_credentials((
(private_key, certificate_chain),))
server.add_secure_port('[::]:{}'.format(args.port), credentials)
else:
server.add_insecure_port('[::]:{}'.format(args.port))
server.start()
logging.info('Server serving.')
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except BaseException as e:
logging.info('Caught exception "%s"; stopping server...', e)
server.stop(None)
logging.info('Server stopped; exiting.')
if __name__ == '__main__':
serve()
| # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC interoperability test server."""
import argparse
from concurrent import futures
import logging
import time
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import methods
from tests.interop import resources
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def serve():
parser = argparse.ArgumentParser()
parser.add_argument('--port', help='the port on which to serve', type=int)
parser.add_argument(
'--use_tls',
help='require a secure connection',
default=False,
type=resources.parse_bool)
args = parser.parse_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
server)
if args.use_tls:
private_key = resources.private_key()
certificate_chain = resources.certificate_chain()
credentials = grpc.ssl_server_credentials((
(private_key, certificate_chain),))
server.add_secure_port('[::]:{}'.format(args.port), credentials)
else:
server.add_insecure_port('[::]:{}'.format(args.port))
server.start()
logging.info('Server serving.')
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except BaseException as e:
logging.info('Caught exception "%s"; stopping server...', e)
server.stop(None)
logging.info('Server stopped; exiting.')
if __name__ == '__main__':
serve() | en | 0.834055 | # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The Python implementation of the GRPC interoperability test server. | 2.199032 | 2 |
SemanticMask_generation.py | PingCheng-Wei/SD-MaskRCNN | 0 | 6624864 | '''
Given a root directory of dataset which contains the SegmentationMask folder,
this script will automatically generate the semantic segmentation masks for each image
'''
import numpy as np
import matplotlib.pyplot as plt
import skimage.io
import skimage.color
import os
import sys
import argparse
def semanticmask_generation(root_dir):
# make sure root_dir is absolute path
if not os.path.isabs(root_dir):
root_dir = os.path.abspath(root_dir)
segmask_path = os.path.join(root_dir, 'SegmentationMasks')
segmask_800_path = os.path.join(root_dir, 'SegmentationMasks_800_800')
semanticmask_path = os.path.join(root_dir, 'segmasks_filled')
semanticmask_800_path = os.path.join(root_dir, 'segmasks_filled_800_800')
# segmask_path = os.path.join(root_dir, 'modal_segmasks')
# segmask_800_path = os.path.join(root_dir, 'modal_segmasks_800_800')
# semanticmask_path = os.path.join(root_dir, 'segmasks_filled')
# semanticmask_800_path = os.path.join(root_dir, 'segmasks_filled_800_800')
# make sure path exists
if not os.path.exists(semanticmask_path):
os.mkdir(semanticmask_path)
if not os.path.exists(semanticmask_800_path):
os.mkdir(semanticmask_800_path)
# iterate through the files in folder
for file in os.listdir(segmask_path):
# separate the root and the ext of the file
file_root, file_ext = os.path.splitext(file)
# search for .png and .jpg file
if file_ext == '.png' or file_ext == '.jpg':
# read the image
img_path = os.path.join(segmask_path, file)
img = skimage.io.imread(img_path)
# add this condition to make sure that the input mask is 2D
if img.shape[-1] == 4 and img.ndim == 3:
img = img[..., :3] # convert 4-channel to 3-channel
# convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN
if img.shape[-1] == 3:
img = img[:, :, 0]
# find the BG, Box, Package label
img_BG = img == 0
img_Box = img == 13
img_Package = img == 21
# create the filter to filter out BG, Box, Package
img_filter = img_BG + img_Box + img_Package
# filter instance(1) and no instance(0)
SemanticMask = np.where(img_filter, 0, 1)
# store the image in corresponding directory
os.chdir(semanticmask_path)
skimage.io.imsave(file, SemanticMask)
# iterate through the files in folder
for file in os.listdir(segmask_800_path):
# seperate the root and the ext of the file
file_root, file_ext = os.path.splitext(file)
# search for .png and .jpg file
if file_ext == '.png' or file_ext == '.jpg':
# read the image
img_path = os.path.join(segmask_800_path, file)
img = skimage.io.imread(img_path)
# add this condition to make sure that the input mask is 2D
if img.shape[-1] == 4 and img.ndim == 3:
img = img[..., :3] # convert 4-channel to 3-channel
# convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN
if img.shape[-1] == 3:
img = img[:, :, 0]
# find the BG, Box, Package label
img_BG = img == 0
img_Box = img == 13
img_Package = img == 21
# create the filter to filter out BG, Box, Package
img_filter = img_BG + img_Box + img_Package
# filter instance(1) and no instance(0)
SemanticMask = np.where(img_filter, 0, 1)
# store the image in corresponding directory
os.chdir(semanticmask_800_path)
skimage.io.imsave(file, SemanticMask)
if __name__ == '__main__':
# Parse comment line arguments
parser = argparse.ArgumentParser(
description='Copying and Renaming all images for SD-MaskRCNN')
parser.add_argument('--root_dir', required=True,
metavar="/path/to/dataset",
help='root directory of the dataset which contains instance masks folder')
args = parser.parse_args()
print('ROOT_DIR: {}'.format(args.root_dir))
print('Start generating the semantic masks ...')
semanticmask_generation(args.root_dir)
print('done!') | '''
Given a root directory of dataset which contains the SegmentationMask folder,
this script will automatically generate the semantic segmentation masks for each image
'''
import numpy as np
import matplotlib.pyplot as plt
import skimage.io
import skimage.color
import os
import sys
import argparse
def semanticmask_generation(root_dir):
# make sure root_dir is absolute path
if not os.path.isabs(root_dir):
root_dir = os.path.abspath(root_dir)
segmask_path = os.path.join(root_dir, 'SegmentationMasks')
segmask_800_path = os.path.join(root_dir, 'SegmentationMasks_800_800')
semanticmask_path = os.path.join(root_dir, 'segmasks_filled')
semanticmask_800_path = os.path.join(root_dir, 'segmasks_filled_800_800')
# segmask_path = os.path.join(root_dir, 'modal_segmasks')
# segmask_800_path = os.path.join(root_dir, 'modal_segmasks_800_800')
# semanticmask_path = os.path.join(root_dir, 'segmasks_filled')
# semanticmask_800_path = os.path.join(root_dir, 'segmasks_filled_800_800')
# make sure path exists
if not os.path.exists(semanticmask_path):
os.mkdir(semanticmask_path)
if not os.path.exists(semanticmask_800_path):
os.mkdir(semanticmask_800_path)
# iterate through the files in folder
for file in os.listdir(segmask_path):
# separate the root and the ext of the file
file_root, file_ext = os.path.splitext(file)
# search for .png and .jpg file
if file_ext == '.png' or file_ext == '.jpg':
# read the image
img_path = os.path.join(segmask_path, file)
img = skimage.io.imread(img_path)
# add this condition to make sure that the input mask is 2D
if img.shape[-1] == 4 and img.ndim == 3:
img = img[..., :3] # convert 4-channel to 3-channel
# convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN
if img.shape[-1] == 3:
img = img[:, :, 0]
# find the BG, Box, Package label
img_BG = img == 0
img_Box = img == 13
img_Package = img == 21
# create the filter to filter out BG, Box, Package
img_filter = img_BG + img_Box + img_Package
# filter instance(1) and no instance(0)
SemanticMask = np.where(img_filter, 0, 1)
# store the image in corresponding directory
os.chdir(semanticmask_path)
skimage.io.imsave(file, SemanticMask)
# iterate through the files in folder
for file in os.listdir(segmask_800_path):
# seperate the root and the ext of the file
file_root, file_ext = os.path.splitext(file)
# search for .png and .jpg file
if file_ext == '.png' or file_ext == '.jpg':
# read the image
img_path = os.path.join(segmask_800_path, file)
img = skimage.io.imread(img_path)
# add this condition to make sure that the input mask is 2D
if img.shape[-1] == 4 and img.ndim == 3:
img = img[..., :3] # convert 4-channel to 3-channel
# convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN
if img.shape[-1] == 3:
img = img[:, :, 0]
# find the BG, Box, Package label
img_BG = img == 0
img_Box = img == 13
img_Package = img == 21
# create the filter to filter out BG, Box, Package
img_filter = img_BG + img_Box + img_Package
# filter instance(1) and no instance(0)
SemanticMask = np.where(img_filter, 0, 1)
# store the image in corresponding directory
os.chdir(semanticmask_800_path)
skimage.io.imsave(file, SemanticMask)
if __name__ == '__main__':
# Parse comment line arguments
parser = argparse.ArgumentParser(
description='Copying and Renaming all images for SD-MaskRCNN')
parser.add_argument('--root_dir', required=True,
metavar="/path/to/dataset",
help='root directory of the dataset which contains instance masks folder')
args = parser.parse_args()
print('ROOT_DIR: {}'.format(args.root_dir))
print('Start generating the semantic masks ...')
semanticmask_generation(args.root_dir)
print('done!') | en | 0.735333 | Given a root directory of dataset which contains the SegmentationMask folder, this script will automatically generate the semantic segmentation masks for each image # make sure root_dir is absolute path # segmask_path = os.path.join(root_dir, 'modal_segmasks') # segmask_800_path = os.path.join(root_dir, 'modal_segmasks_800_800') # semanticmask_path = os.path.join(root_dir, 'segmasks_filled') # semanticmask_800_path = os.path.join(root_dir, 'segmasks_filled_800_800') # make sure path exists # iterate through the files in folder # separate the root and the ext of the file # search for .png and .jpg file # read the image # add this condition to make sure that the input mask is 2D # convert 4-channel to 3-channel # convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN # find the BG, Box, Package label # create the filter to filter out BG, Box, Package # filter instance(1) and no instance(0) # store the image in corresponding directory # iterate through the files in folder # seperate the root and the ext of the file # search for .png and .jpg file # read the image # add this condition to make sure that the input mask is 2D # convert 4-channel to 3-channel # convert 3D image to 2D image, because we must need 2D Mask for SD-MaskRCNN # find the BG, Box, Package label # create the filter to filter out BG, Box, Package # filter instance(1) and no instance(0) # store the image in corresponding directory # Parse comment line arguments | 3.070188 | 3 |
ringapp/migrations/0003_commlogic_theorem.py | rschwiebert/RingApp | 10 | 6624865 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0002_logic_theorem'),
]
operations = [
migrations.AddField(
model_name='commlogic',
name='theorem',
field=models.ForeignKey(db_column='theorem_id', blank=True, to='ringapp.Theorem', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0002_logic_theorem'),
]
operations = [
migrations.AddField(
model_name='commlogic',
name='theorem',
field=models.ForeignKey(db_column='theorem_id', blank=True, to='ringapp.Theorem', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.462703 | 1 |
BTrees/tests/testBTrees.py | azmeuk/BTrees | 66 | 6624866 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import unittest
from BTrees.tests.common import permutations
class DegenerateBTree(unittest.TestCase):
# Build a degenerate tree (set). Boxes are BTree nodes. There are
# 5 leaf buckets, each containing a single int. Keys in the BTree
# nodes don't appear in the buckets. Seven BTree nodes are purely
# indirection nodes (no keys). Buckets aren't all at the same depth:
#
# +------------------------+
# | 4 |
# +------------------------+
# | |
# | v
# | +-+
# | | |
# | +-+
# | |
# v v
# +-------+ +-------------+
# | 2 | | 6 10 |
# +-------+ +-------------+
# | | | | |
# v v v v v
# +-+ +-+ +-+ +-+ +-+
# | | | | | | | | | |
# +-+ +-+ +-+ +-+ +-+
# | | | | |
# v v v v v
# 1 3 +-+ 7 11
# | |
# +-+
# |
# v
# 5
#
# This is nasty for many algorithms. Consider a high-end range search
# for 4. The BTree nodes direct it to the 5 bucket, but the correct
# answer is the 3 bucket, which requires going in a different direction
# at the very top node already. Consider a low-end range search for
# 9. The BTree nodes direct it to the 7 bucket, but the correct answer
# is the 11 bucket. This is also a nasty-case tree for deletions.
def _build_degenerate_tree(self):
# Build the buckets and chain them together.
from BTrees.IIBTree import IISet
from BTrees.IIBTree import IITreeSet
from BTrees.check import check
bucket11 = IISet([11])
bucket7 = IISet()
bucket7.__setstate__(((7,), bucket11))
bucket5 = IISet()
bucket5.__setstate__(((5,), bucket7))
bucket3 = IISet()
bucket3.__setstate__(((3,), bucket5))
bucket1 = IISet()
bucket1.__setstate__(((1,), bucket3))
# Build the deepest layers of indirection nodes.
ts = IITreeSet
tree1 = ts()
tree1.__setstate__(((bucket1,), bucket1))
tree3 = ts()
tree3.__setstate__(((bucket3,), bucket3))
tree5lower = ts()
tree5lower.__setstate__(((bucket5,), bucket5))
tree5 = ts()
tree5.__setstate__(((tree5lower,), bucket5))
tree7 = ts()
tree7.__setstate__(((bucket7,), bucket7))
tree11 = ts()
tree11.__setstate__(((bucket11,), bucket11))
# Paste together the middle layers.
tree13 = ts()
tree13.__setstate__(((tree1, 2, tree3), bucket1))
tree5711lower = ts()
tree5711lower.__setstate__(((tree5, 6, tree7, 10, tree11), bucket5))
tree5711 = ts()
tree5711.__setstate__(((tree5711lower,), bucket5))
# One more.
t = ts()
t.__setstate__(((tree13, 4, tree5711), bucket1))
t._check()
check(t)
return t, [1, 3, 5, 7, 11]
def testBasicOps(self):
t, keys = self._build_degenerate_tree()
self.assertEqual(len(t), len(keys))
self.assertEqual(list(t.keys()), keys)
self.assertTrue(t.has_key(1))
self.assertTrue(t.has_key(3))
self.assertTrue(t.has_key(5))
self.assertTrue(t.has_key(7))
self.assertTrue(t.has_key(11))
for i in 0, 2, 4, 6, 8, 9, 10, 12:
self.assertNotIn(i, t)
def _checkRanges(self, tree, keys):
self.assertEqual(len(tree), len(keys))
sorted_keys = keys[:]
sorted_keys.sort()
self.assertEqual(list(tree.keys()), sorted_keys)
for k in keys:
self.assertTrue(k in tree)
if keys:
lokey = sorted_keys[0]
hikey = sorted_keys[-1]
self.assertEqual(lokey, tree.minKey())
self.assertEqual(hikey, tree.maxKey())
else:
lokey = hikey = 42
# Try all range searches.
for lo in range(lokey - 1, hikey + 2):
for hi in range(lo - 1, hikey + 2):
for skipmin in False, True:
for skipmax in False, True:
wantlo, wanthi = lo, hi
if skipmin:
wantlo += 1
if skipmax:
wanthi -= 1
want = [k for k in keys if wantlo <= k <= wanthi]
got = list(tree.keys(lo, hi, skipmin, skipmax))
self.assertEqual(want, got)
def testRanges(self):
t, keys = self._build_degenerate_tree()
self._checkRanges(t, keys)
def testDeletes(self):
# Delete keys in all possible orders, checking each tree along
# the way.
# This is a tough test. Previous failure modes included:
# 1. A variety of assertion failures in _checkRanges.
# 2. Assorted "Invalid firstbucket pointer" failures at
# seemingly random times, coming out of the BTree destructor.
# 3. Under Python 2.3 CVS, some baffling
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
# warnings, possibly due to memory corruption after a BTree
# goes insane.
# On CPython in PURE_PYTHON mode, this is a *slow* test, taking 15+s
# on a 2015 laptop.
from BTrees.check import check
t, keys = self._build_degenerate_tree()
for oneperm in permutations(keys):
t, keys = self._build_degenerate_tree()
for key in oneperm:
t.remove(key)
keys.remove(key)
t._check()
check(t)
self._checkRanges(t, keys)
# We removed all the keys, so the tree should be empty now.
self.assertEqual(t.__getstate__(), None)
# A damaged tree may trigger an "invalid firstbucket pointer"
# failure at the time its destructor is invoked. Try to force
# that to happen now, so it doesn't look like a baffling failure
# at some unrelated line.
del t # trigger destructor
LP294788_ids = {}
class ToBeDeleted(object):
def __init__(self, id):
assert isinstance(id, int) #we don't want to store any object ref here
self.id = id
global LP294788_ids
LP294788_ids[id] = 1
def __del__(self):
global LP294788_ids
LP294788_ids.pop(self.id, None)
def __cmp__(self, other):
return cmp(self.id, other.id)
def __le__(self, other):
return self.id <= other.id
def __lt__(self, other):
return self.id < other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __hash__(self):
return hash(self.id)
class TestBugFixes(unittest.TestCase):
# Collector 1843. Error returns were effectively ignored in
# Bucket_rangeSearch(), leading to "delayed" errors, or worse.
def testFixed1843(self):
from BTrees.IIBTree import IISet
t = IISet()
t.insert(1)
# This one used to fail to raise the TypeError when it occurred.
self.assertRaises(TypeError, t.keys, "")
# This one used to segfault.
self.assertRaises(TypeError, t.keys, 0, "")
def test_LP294788(self):
# https://bugs.launchpad.net/bugs/294788
# BTree keeps some deleted objects referenced
# The logic here together with the ToBeDeleted class is that
# a separate reference dict is populated on object creation
# and removed in __del__
# That means what's left in the reference dict is never GC'ed
# therefore referenced somewhere
# To simulate real life, some random data is used to exercise the tree
import gc
import random
from BTrees.OOBTree import OOBTree
t = OOBTree()
trandom = random.Random('OOBTree')
global LP294788_ids
# /// BTree keys are integers, value is an object
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
ids[id] = 1
t[id] = ToBeDeleted(id)
else:
#del
keys = list(ids.keys())
if keys:
id = trandom.choice(list(ids.keys()))
del t[id]
del ids[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
ids = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# \\\
# /// BTree keys are integers, value is a tuple having an object
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
ids[id] = 1
t[id] = (id, ToBeDeleted(id), u'somename')
else:
#del
keys = list(ids.keys())
if keys:
id = trandom.choice(keys)
del t[id]
del ids[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
ids = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# \\\
# /// BTree keys are objects, value is an int
t = OOBTree()
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = ToBeDeleted(trandom.randint(0, 1000000))
ids[id] = 1
t[id] = 1
else:
#del
id = trandom.choice(list(ids.keys()))
del ids[id]
del t[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
#release all refs
ids = id = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# /// BTree keys are tuples having objects, value is an int
t = OOBTree()
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
id = (id, ToBeDeleted(id), u'somename')
ids[id] = 1
t[id] = 1
else:
#del
id = trandom.choice(list(ids.keys()))
del ids[id]
del t[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
#release all refs
ids = id = key = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# cmp error propagation tests
class DoesntLikeBeingCompared:
def __cmp__(self, other):
raise ValueError('incomparable')
__lt__ = __le__ = __eq__ = __ne__ = __ge__ = __gt__ = __cmp__
class TestCmpError(unittest.TestCase):
def testFoo(self):
from BTrees.OOBTree import OOBTree
t = OOBTree()
t['hello world'] = None
try:
t[DoesntLikeBeingCompared()] = None
except ValueError as e:
self.assertEqual(str(e), 'incomparable')
else:
self.fail('incomarable objects should not be allowed into '
'the tree')
class FamilyTest(unittest.TestCase):
def test32(self):
from zope.interface.verify import verifyObject
import BTrees
from BTrees.IOBTree import IOTreeSet
verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family32)
self.assertEqual(
BTrees.family32.IO, BTrees.IOBTree)
self.assertEqual(
BTrees.family32.OI, BTrees.OIBTree)
self.assertEqual(
BTrees.family32.II, BTrees.IIBTree)
self.assertEqual(
BTrees.family32.IF, BTrees.IFBTree)
self.assertEqual(
BTrees.family32.UO, BTrees.UOBTree)
self.assertEqual(
BTrees.family32.OU, BTrees.OUBTree)
self.assertEqual(
BTrees.family32.UU, BTrees.UUBTree)
self.assertEqual(
BTrees.family32.UF, BTrees.UFBTree)
self.assertEqual(
BTrees.family32.OO, BTrees.OOBTree)
self.assertEqual(
BTrees.family32.OU, BTrees.OUBTree)
s = IOTreeSet()
s.insert(BTrees.family32.maxint)
self.assertTrue(BTrees.family32.maxint in s)
s = IOTreeSet()
s.insert(BTrees.family32.minint)
self.assertTrue(BTrees.family32.minint in s)
s = IOTreeSet()
# this next bit illustrates an, um, "interesting feature". If
# the characteristics change to match the 64 bit version, please
# feel free to change.
with self.assertRaises((TypeError, OverflowError)):
s.insert(BTrees.family32.maxint + 1)
with self.assertRaises((TypeError, OverflowError)):
s.insert(BTrees.family32.minint - 1)
self.check_pickling(BTrees.family32)
def test64(self):
from zope.interface.verify import verifyObject
import BTrees
from BTrees.LOBTree import LOTreeSet
verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family64)
self.assertEqual(
BTrees.family64.IO, BTrees.LOBTree)
self.assertEqual(
BTrees.family64.OI, BTrees.OLBTree)
self.assertEqual(
BTrees.family64.II, BTrees.LLBTree)
self.assertEqual(
BTrees.family64.IF, BTrees.LFBTree)
self.assertEqual(
BTrees.family64.UO, BTrees.QOBTree)
self.assertEqual(
BTrees.family64.OU, BTrees.OQBTree)
self.assertEqual(
BTrees.family64.UU, BTrees.QQBTree)
self.assertEqual(
BTrees.family64.UF, BTrees.QFBTree)
self.assertEqual(
BTrees.family64.OO, BTrees.OOBTree)
self.assertEqual(
BTrees.family64.OU, BTrees.OQBTree)
s = LOTreeSet()
s.insert(BTrees.family64.maxint)
self.assertTrue(BTrees.family64.maxint in s)
s = LOTreeSet()
s.insert(BTrees.family64.minint)
self.assertTrue(BTrees.family64.minint in s)
s = LOTreeSet()
# XXX why oh why do we expect ValueError here, but TypeError in test32?
self.assertRaises((TypeError, OverflowError), s.insert, BTrees.family64.maxint + 1)
self.assertRaises((TypeError, OverflowError), s.insert, BTrees.family64.minint - 1)
self.check_pickling(BTrees.family64)
def check_pickling(self, family):
# The "family" objects are singletons; they can be pickled and
# unpickled, and the same instances will always be returned on
# unpickling, whether from the same unpickler or different
# unpicklers.
import pickle
from io import BytesIO
s = pickle.dumps((family, family))
(f1, f2) = pickle.loads(s)
self.assertIs(f1, family)
self.assertIs(f2, family)
# Using a single memo across multiple pickles:
sio = BytesIO()
p = pickle.Pickler(sio)
p.dump(family)
p.dump([family])
u = pickle.Unpickler(BytesIO(sio.getvalue()))
f1 = u.load()
f2, = u.load()
self.assertTrue(f1 is family)
self.assertTrue(f2 is family)
# Using separate memos for each pickle:
sio = BytesIO()
p = pickle.Pickler(sio)
p.dump(family)
p.clear_memo()
p.dump([family])
u = pickle.Unpickler(BytesIO(sio.getvalue()))
f1 = u.load()
f2, = u.load()
self.assertTrue(f1 is family)
self.assertTrue(f2 is family)
| ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import unittest
from BTrees.tests.common import permutations
class DegenerateBTree(unittest.TestCase):
# Build a degenerate tree (set). Boxes are BTree nodes. There are
# 5 leaf buckets, each containing a single int. Keys in the BTree
# nodes don't appear in the buckets. Seven BTree nodes are purely
# indirection nodes (no keys). Buckets aren't all at the same depth:
#
# +------------------------+
# | 4 |
# +------------------------+
# | |
# | v
# | +-+
# | | |
# | +-+
# | |
# v v
# +-------+ +-------------+
# | 2 | | 6 10 |
# +-------+ +-------------+
# | | | | |
# v v v v v
# +-+ +-+ +-+ +-+ +-+
# | | | | | | | | | |
# +-+ +-+ +-+ +-+ +-+
# | | | | |
# v v v v v
# 1 3 +-+ 7 11
# | |
# +-+
# |
# v
# 5
#
# This is nasty for many algorithms. Consider a high-end range search
# for 4. The BTree nodes direct it to the 5 bucket, but the correct
# answer is the 3 bucket, which requires going in a different direction
# at the very top node already. Consider a low-end range search for
# 9. The BTree nodes direct it to the 7 bucket, but the correct answer
# is the 11 bucket. This is also a nasty-case tree for deletions.
def _build_degenerate_tree(self):
# Build the buckets and chain them together.
from BTrees.IIBTree import IISet
from BTrees.IIBTree import IITreeSet
from BTrees.check import check
bucket11 = IISet([11])
bucket7 = IISet()
bucket7.__setstate__(((7,), bucket11))
bucket5 = IISet()
bucket5.__setstate__(((5,), bucket7))
bucket3 = IISet()
bucket3.__setstate__(((3,), bucket5))
bucket1 = IISet()
bucket1.__setstate__(((1,), bucket3))
# Build the deepest layers of indirection nodes.
ts = IITreeSet
tree1 = ts()
tree1.__setstate__(((bucket1,), bucket1))
tree3 = ts()
tree3.__setstate__(((bucket3,), bucket3))
tree5lower = ts()
tree5lower.__setstate__(((bucket5,), bucket5))
tree5 = ts()
tree5.__setstate__(((tree5lower,), bucket5))
tree7 = ts()
tree7.__setstate__(((bucket7,), bucket7))
tree11 = ts()
tree11.__setstate__(((bucket11,), bucket11))
# Paste together the middle layers.
tree13 = ts()
tree13.__setstate__(((tree1, 2, tree3), bucket1))
tree5711lower = ts()
tree5711lower.__setstate__(((tree5, 6, tree7, 10, tree11), bucket5))
tree5711 = ts()
tree5711.__setstate__(((tree5711lower,), bucket5))
# One more.
t = ts()
t.__setstate__(((tree13, 4, tree5711), bucket1))
t._check()
check(t)
return t, [1, 3, 5, 7, 11]
def testBasicOps(self):
t, keys = self._build_degenerate_tree()
self.assertEqual(len(t), len(keys))
self.assertEqual(list(t.keys()), keys)
self.assertTrue(t.has_key(1))
self.assertTrue(t.has_key(3))
self.assertTrue(t.has_key(5))
self.assertTrue(t.has_key(7))
self.assertTrue(t.has_key(11))
for i in 0, 2, 4, 6, 8, 9, 10, 12:
self.assertNotIn(i, t)
def _checkRanges(self, tree, keys):
self.assertEqual(len(tree), len(keys))
sorted_keys = keys[:]
sorted_keys.sort()
self.assertEqual(list(tree.keys()), sorted_keys)
for k in keys:
self.assertTrue(k in tree)
if keys:
lokey = sorted_keys[0]
hikey = sorted_keys[-1]
self.assertEqual(lokey, tree.minKey())
self.assertEqual(hikey, tree.maxKey())
else:
lokey = hikey = 42
# Try all range searches.
for lo in range(lokey - 1, hikey + 2):
for hi in range(lo - 1, hikey + 2):
for skipmin in False, True:
for skipmax in False, True:
wantlo, wanthi = lo, hi
if skipmin:
wantlo += 1
if skipmax:
wanthi -= 1
want = [k for k in keys if wantlo <= k <= wanthi]
got = list(tree.keys(lo, hi, skipmin, skipmax))
self.assertEqual(want, got)
def testRanges(self):
t, keys = self._build_degenerate_tree()
self._checkRanges(t, keys)
def testDeletes(self):
# Delete keys in all possible orders, checking each tree along
# the way.
# This is a tough test. Previous failure modes included:
# 1. A variety of assertion failures in _checkRanges.
# 2. Assorted "Invalid firstbucket pointer" failures at
# seemingly random times, coming out of the BTree destructor.
# 3. Under Python 2.3 CVS, some baffling
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
# warnings, possibly due to memory corruption after a BTree
# goes insane.
# On CPython in PURE_PYTHON mode, this is a *slow* test, taking 15+s
# on a 2015 laptop.
from BTrees.check import check
t, keys = self._build_degenerate_tree()
for oneperm in permutations(keys):
t, keys = self._build_degenerate_tree()
for key in oneperm:
t.remove(key)
keys.remove(key)
t._check()
check(t)
self._checkRanges(t, keys)
# We removed all the keys, so the tree should be empty now.
self.assertEqual(t.__getstate__(), None)
# A damaged tree may trigger an "invalid firstbucket pointer"
# failure at the time its destructor is invoked. Try to force
# that to happen now, so it doesn't look like a baffling failure
# at some unrelated line.
del t # trigger destructor
LP294788_ids = {}
class ToBeDeleted(object):
def __init__(self, id):
assert isinstance(id, int) #we don't want to store any object ref here
self.id = id
global LP294788_ids
LP294788_ids[id] = 1
def __del__(self):
global LP294788_ids
LP294788_ids.pop(self.id, None)
def __cmp__(self, other):
return cmp(self.id, other.id)
def __le__(self, other):
return self.id <= other.id
def __lt__(self, other):
return self.id < other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __hash__(self):
return hash(self.id)
class TestBugFixes(unittest.TestCase):
# Collector 1843. Error returns were effectively ignored in
# Bucket_rangeSearch(), leading to "delayed" errors, or worse.
def testFixed1843(self):
from BTrees.IIBTree import IISet
t = IISet()
t.insert(1)
# This one used to fail to raise the TypeError when it occurred.
self.assertRaises(TypeError, t.keys, "")
# This one used to segfault.
self.assertRaises(TypeError, t.keys, 0, "")
def test_LP294788(self):
# https://bugs.launchpad.net/bugs/294788
# BTree keeps some deleted objects referenced
# The logic here together with the ToBeDeleted class is that
# a separate reference dict is populated on object creation
# and removed in __del__
# That means what's left in the reference dict is never GC'ed
# therefore referenced somewhere
# To simulate real life, some random data is used to exercise the tree
import gc
import random
from BTrees.OOBTree import OOBTree
t = OOBTree()
trandom = random.Random('OOBTree')
global LP294788_ids
# /// BTree keys are integers, value is an object
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
ids[id] = 1
t[id] = ToBeDeleted(id)
else:
#del
keys = list(ids.keys())
if keys:
id = trandom.choice(list(ids.keys()))
del t[id]
del ids[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
ids = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# \\\
# /// BTree keys are integers, value is a tuple having an object
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
ids[id] = 1
t[id] = (id, ToBeDeleted(id), u'somename')
else:
#del
keys = list(ids.keys())
if keys:
id = trandom.choice(keys)
del t[id]
del ids[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
ids = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# \\\
# /// BTree keys are objects, value is an int
t = OOBTree()
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = ToBeDeleted(trandom.randint(0, 1000000))
ids[id] = 1
t[id] = 1
else:
#del
id = trandom.choice(list(ids.keys()))
del ids[id]
del t[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
#release all refs
ids = id = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# /// BTree keys are tuples having objects, value is an int
t = OOBTree()
LP294788_ids = {}
ids = {}
for i in range(1024):
if trandom.random() > 0.1 or not ids:
#add
id = None
while id is None or id in ids:
id = trandom.randint(0, 1000000)
id = (id, ToBeDeleted(id), u'somename')
ids[id] = 1
t[id] = 1
else:
#del
id = trandom.choice(list(ids.keys()))
del ids[id]
del t[id]
ids = ids.keys()
trandom.shuffle(list(ids))
for id in ids:
del t[id]
#release all refs
ids = id = key = None
#to be on the safe side run a full GC
gc.collect()
#print LP294788_ids
self.assertEqual(len(t), 0)
self.assertEqual(len(LP294788_ids), 0)
# cmp error propagation tests
class DoesntLikeBeingCompared:
def __cmp__(self, other):
raise ValueError('incomparable')
__lt__ = __le__ = __eq__ = __ne__ = __ge__ = __gt__ = __cmp__
class TestCmpError(unittest.TestCase):
def testFoo(self):
from BTrees.OOBTree import OOBTree
t = OOBTree()
t['hello world'] = None
try:
t[DoesntLikeBeingCompared()] = None
except ValueError as e:
self.assertEqual(str(e), 'incomparable')
else:
self.fail('incomarable objects should not be allowed into '
'the tree')
class FamilyTest(unittest.TestCase):
def test32(self):
from zope.interface.verify import verifyObject
import BTrees
from BTrees.IOBTree import IOTreeSet
verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family32)
self.assertEqual(
BTrees.family32.IO, BTrees.IOBTree)
self.assertEqual(
BTrees.family32.OI, BTrees.OIBTree)
self.assertEqual(
BTrees.family32.II, BTrees.IIBTree)
self.assertEqual(
BTrees.family32.IF, BTrees.IFBTree)
self.assertEqual(
BTrees.family32.UO, BTrees.UOBTree)
self.assertEqual(
BTrees.family32.OU, BTrees.OUBTree)
self.assertEqual(
BTrees.family32.UU, BTrees.UUBTree)
self.assertEqual(
BTrees.family32.UF, BTrees.UFBTree)
self.assertEqual(
BTrees.family32.OO, BTrees.OOBTree)
self.assertEqual(
BTrees.family32.OU, BTrees.OUBTree)
s = IOTreeSet()
s.insert(BTrees.family32.maxint)
self.assertTrue(BTrees.family32.maxint in s)
s = IOTreeSet()
s.insert(BTrees.family32.minint)
self.assertTrue(BTrees.family32.minint in s)
s = IOTreeSet()
# this next bit illustrates an, um, "interesting feature". If
# the characteristics change to match the 64 bit version, please
# feel free to change.
with self.assertRaises((TypeError, OverflowError)):
s.insert(BTrees.family32.maxint + 1)
with self.assertRaises((TypeError, OverflowError)):
s.insert(BTrees.family32.minint - 1)
self.check_pickling(BTrees.family32)
def test64(self):
from zope.interface.verify import verifyObject
import BTrees
from BTrees.LOBTree import LOTreeSet
verifyObject(BTrees.Interfaces.IBTreeFamily, BTrees.family64)
self.assertEqual(
BTrees.family64.IO, BTrees.LOBTree)
self.assertEqual(
BTrees.family64.OI, BTrees.OLBTree)
self.assertEqual(
BTrees.family64.II, BTrees.LLBTree)
self.assertEqual(
BTrees.family64.IF, BTrees.LFBTree)
self.assertEqual(
BTrees.family64.UO, BTrees.QOBTree)
self.assertEqual(
BTrees.family64.OU, BTrees.OQBTree)
self.assertEqual(
BTrees.family64.UU, BTrees.QQBTree)
self.assertEqual(
BTrees.family64.UF, BTrees.QFBTree)
self.assertEqual(
BTrees.family64.OO, BTrees.OOBTree)
self.assertEqual(
BTrees.family64.OU, BTrees.OQBTree)
s = LOTreeSet()
s.insert(BTrees.family64.maxint)
self.assertTrue(BTrees.family64.maxint in s)
s = LOTreeSet()
s.insert(BTrees.family64.minint)
self.assertTrue(BTrees.family64.minint in s)
s = LOTreeSet()
# XXX why oh why do we expect ValueError here, but TypeError in test32?
self.assertRaises((TypeError, OverflowError), s.insert, BTrees.family64.maxint + 1)
self.assertRaises((TypeError, OverflowError), s.insert, BTrees.family64.minint - 1)
self.check_pickling(BTrees.family64)
def check_pickling(self, family):
# The "family" objects are singletons; they can be pickled and
# unpickled, and the same instances will always be returned on
# unpickling, whether from the same unpickler or different
# unpicklers.
import pickle
from io import BytesIO
s = pickle.dumps((family, family))
(f1, f2) = pickle.loads(s)
self.assertIs(f1, family)
self.assertIs(f2, family)
# Using a single memo across multiple pickles:
sio = BytesIO()
p = pickle.Pickler(sio)
p.dump(family)
p.dump([family])
u = pickle.Unpickler(BytesIO(sio.getvalue()))
f1 = u.load()
f2, = u.load()
self.assertTrue(f1 is family)
self.assertTrue(f2 is family)
# Using separate memos for each pickle:
sio = BytesIO()
p = pickle.Pickler(sio)
p.dump(family)
p.clear_memo()
p.dump([family])
u = pickle.Unpickler(BytesIO(sio.getvalue()))
f1 = u.load()
f2, = u.load()
self.assertTrue(f1 is family)
self.assertTrue(f2 is family)
| en | 0.826757 | ############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE # ############################################################################## # Build a degenerate tree (set). Boxes are BTree nodes. There are # 5 leaf buckets, each containing a single int. Keys in the BTree # nodes don't appear in the buckets. Seven BTree nodes are purely # indirection nodes (no keys). Buckets aren't all at the same depth: # # +------------------------+ # | 4 | # +------------------------+ # | | # | v # | +-+ # | | | # | +-+ # | | # v v # +-------+ +-------------+ # | 2 | | 6 10 | # +-------+ +-------------+ # | | | | | # v v v v v # +-+ +-+ +-+ +-+ +-+ # | | | | | | | | | | # +-+ +-+ +-+ +-+ +-+ # | | | | | # v v v v v # 1 3 +-+ 7 11 # | | # +-+ # | # v # 5 # # This is nasty for many algorithms. Consider a high-end range search # for 4. The BTree nodes direct it to the 5 bucket, but the correct # answer is the 3 bucket, which requires going in a different direction # at the very top node already. Consider a low-end range search for # 9. The BTree nodes direct it to the 7 bucket, but the correct answer # is the 11 bucket. This is also a nasty-case tree for deletions. # Build the buckets and chain them together. # Build the deepest layers of indirection nodes. # Paste together the middle layers. # One more. # Try all range searches. # Delete keys in all possible orders, checking each tree along # the way. # This is a tough test. Previous failure modes included: # 1. A variety of assertion failures in _checkRanges. # 2. Assorted "Invalid firstbucket pointer" failures at # seemingly random times, coming out of the BTree destructor. # 3. Under Python 2.3 CVS, some baffling # RuntimeWarning: tp_compare didn't return -1 or -2 for exception # warnings, possibly due to memory corruption after a BTree # goes insane. # On CPython in PURE_PYTHON mode, this is a *slow* test, taking 15+s # on a 2015 laptop. # We removed all the keys, so the tree should be empty now. # A damaged tree may trigger an "invalid firstbucket pointer" # failure at the time its destructor is invoked. Try to force # that to happen now, so it doesn't look like a baffling failure # at some unrelated line. # trigger destructor #we don't want to store any object ref here # Collector 1843. Error returns were effectively ignored in # Bucket_rangeSearch(), leading to "delayed" errors, or worse. # This one used to fail to raise the TypeError when it occurred. # This one used to segfault. # https://bugs.launchpad.net/bugs/294788 # BTree keeps some deleted objects referenced # The logic here together with the ToBeDeleted class is that # a separate reference dict is populated on object creation # and removed in __del__ # That means what's left in the reference dict is never GC'ed # therefore referenced somewhere # To simulate real life, some random data is used to exercise the tree # /// BTree keys are integers, value is an object #add #del #to be on the safe side run a full GC #print LP294788_ids # \\\ # /// BTree keys are integers, value is a tuple having an object #add #del #to be on the safe side run a full GC #print LP294788_ids # \\\ # /// BTree keys are objects, value is an int #add #del #release all refs #to be on the safe side run a full GC #print LP294788_ids # /// BTree keys are tuples having objects, value is an int #add #del #release all refs #to be on the safe side run a full GC #print LP294788_ids # cmp error propagation tests # this next bit illustrates an, um, "interesting feature". If # the characteristics change to match the 64 bit version, please # feel free to change. # XXX why oh why do we expect ValueError here, but TypeError in test32? # The "family" objects are singletons; they can be pickled and # unpickled, and the same instances will always be returned on # unpickling, whether from the same unpickler or different # unpicklers. # Using a single memo across multiple pickles: # Using separate memos for each pickle: | 2.084435 | 2 |
flink-python/pyflink/table/tests/test_table_environment_api.py | rudikershaw/flink | 0 | 6624867 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import decimal
import sys
import unittest
from py4j.protocol import Py4JJavaError
from typing import Iterable
from pyflink.common import RowKind, WatermarkStrategy, Configuration
from pyflink.common.serializer import TypeSerializer
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import MergingWindowAssigner, TimeWindow, Trigger, TriggerResult
from pyflink.datastream.functions import WindowFunction
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.datastream.window import TimeWindowSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes, CsvTableSink, StreamTableEnvironment, EnvironmentSettings, \
Module, ResultKind, ModuleEntry
from pyflink.table.catalog import ObjectPath, CatalogBaseTable
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.expressions import col, source_watermark
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.types import RowType, Row, UserDefinedType
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import (
PyFlinkBatchTableTestCase, PyFlinkStreamTableTestCase, PyFlinkTestCase,
_load_specific_flink_module_jars)
from pyflink.util.java_utils import get_j_env_configuration
class TableEnvironmentTest(object):
def test_set_sys_executable_for_local_mode(self):
jvm = get_gateway().jvm
actual_executable = get_j_env_configuration(self.t_env._get_j_env()) \
.getString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
self.assertEqual(sys.executable, actual_executable)
def test_explain(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain()
assert isinstance(actual, str)
def test_explain_with_extended(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN)
assert isinstance(actual, str)
def test_register_functions(self):
t_env = self.t_env
t_env.register_function(
"python_scalar_func", udf(lambda i: i, result_type=DataTypes.INT()))
t_env.register_java_function("scalar_func",
"org.apache.flink.table.legacyutils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.register_java_function("table_func", "org.apache.flink.table.legacyutils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['python_scalar_func', 'scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_load_module_twice(self):
t_env = self.t_env
self.check_list_modules('core')
self.check_list_full_modules(1, 'core')
self.assertRaisesRegex(
Py4JJavaError, "A module with name 'core' already exists",
t_env.load_module, 'core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
def test_unload_module_twice(self):
t_env = self.t_env
t_env.unload_module('core')
self.check_list_modules()
self.check_list_full_modules(0)
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'core' exists",
t_env.unload_module, 'core')
def test_use_modules(self):
# please do not change this order since ModuleMock depends on FunctionDefinitionMock
_load_specific_flink_module_jars('/flink-table/flink-table-common')
_load_specific_flink_module_jars('/flink-table/flink-table-api-java')
t_env = self.t_env
t_env.load_module('x', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("x")
))
t_env.load_module('y', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("y")
))
self.check_list_modules('core', 'x', 'y')
self.check_list_full_modules(3, 'core', 'x', 'y')
t_env.use_modules('y', 'core')
self.check_list_modules('y', 'core')
self.check_list_full_modules(2, 'y', 'core', 'x')
def check_list_modules(self, *expected_used_modules: str):
self.assert_equals(self.t_env.list_modules(), list(expected_used_modules))
def check_list_full_modules(self, used_module_cnt: int, *expected_loaded_modules: str):
self.assert_equals(self.t_env.list_full_modules(),
[ModuleEntry(module,
expected_loaded_modules.index(module) < used_module_cnt)
for module in expected_loaded_modules])
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
def test_create_temporary_table_from_descriptor(self):
from pyflink.table.schema import Schema
t_env = self.t_env
catalog = t_env.get_current_catalog()
database = t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
t_env.create_temporary_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
self.assertFalse(t_env.get_catalog(catalog).table_exists(ObjectPath(database, "T")))
gateway = get_gateway()
catalog_table = CatalogBaseTable(
t_env._j_tenv.getCatalogManager()
.getTable(gateway.jvm.ObjectIdentifier.of(catalog, database, "T"))
.get()
.getTable())
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_create_table_from_descriptor(self):
from pyflink.table.schema import Schema
catalog = self.t_env.get_current_catalog()
database = self.t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
self.t_env.create_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
object_path = ObjectPath(database, "T")
self.assertTrue(self.t_env.get_catalog(catalog).table_exists(object_path))
catalog_table = self.t_env.get_catalog(catalog).get_table(object_path)
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_table_from_descriptor(self):
from pyflink.table.schema import Schema
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
descriptor = TableDescriptor.for_connector("fake").schema(schema).build()
table = self.t_env.from_descriptor(descriptor)
self.assertEqual(schema,
Schema(Schema.new_builder()._j_builder
.fromResolvedSchema(table._j_table.getResolvedSchema()).build()))
contextResolvedTable = table._j_table.getQueryOperation().getContextResolvedTable()
options = contextResolvedTable.getTable().getOptions()
self.assertEqual("fake", options.get("connector"))
class DataStreamConversionTestCases(PyFlinkTestCase):
def setUp(self) -> None:
from pyflink.datastream import StreamExecutionEnvironment
super(DataStreamConversionTestCases, self).setUp()
config = Configuration()
config.set_string("akka.ask.timeout", "20 s")
self.env = StreamExecutionEnvironment.get_execution_environment(config)
self.t_env = StreamTableEnvironment.create(self.env)
self.env.set_parallelism(2)
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "1")
self.test_sink = DataStreamTestSinkFunction()
def test_from_data_stream_atomic(self):
data_stream = self.env.from_collection([(1,), (2,), (3,), (4,), (5,)])
result = self.t_env.from_data_stream(data_stream).execute()
self.assertEqual("""(
`f0` RAW('[B', '...')
)""",
result._j_table_result.getResolvedSchema().toString())
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in map(str, [Row(1), Row(2), Row(3), Row(4), Row(5)])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_to_data_stream_atomic(self):
table = self.t_env.from_elements([(1,), (2,), (3,)], ["a"])
ds = self.t_env.to_data_stream(table)
ds.add_sink(self.test_sink)
self.env.execute()
results = self.test_sink.get_results(False)
results.sort()
expected = ['+I[1]', '+I[2]', '+I[3]']
self.assertEqual(expected, results)
def test_from_data_stream(self):
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW([Types.INT(),
Types.STRING(),
Types.STRING()]))
t_env = self.t_env
table = t_env.from_data_stream(ds)
field_names = ['a', 'b', 'c']
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink("Sink",
source_sink_utils.TestAppendSink(field_names, field_types))
table.execute_insert("Sink").wait()
result = source_sink_utils.results()
expected = ['+I[1, Hi, Hello]', '+I[2, Hello, Hi]']
self.assert_equals(result, expected)
ds = ds.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()])) \
.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()]))
table = t_env.from_data_stream(ds, col('a'), col('b'), col('c'))
t_env.register_table_sink("ExprSink",
source_sink_utils.TestAppendSink(field_names, field_types))
table.execute_insert("ExprSink").wait()
result = source_sink_utils.results()
self.assert_equals(result, expected)
def test_from_data_stream_with_schema(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW_NAMED(
["a", "b", "c"],
[Types.INT(), Types.STRING(), Types.STRING()]))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column("a", DataTypes.INT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
result = table.execute()
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row(1, 'Hi', 'Hello'), Row(2, 'Hello', 'Hi')])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
@unittest.skip
def test_from_and_to_data_stream_event_time(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW_NAMED(
["a", "b", "c"],
[Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
self.assertEqual("""(
`a` BIGINT,
`b` INT,
`c` STRING,
`rowtime` TIMESTAMP_LTZ(3) *ROWTIME* METADATA,
WATERMARK FOR `rowtime`: TIMESTAMP_LTZ(3) AS SOURCE_WATERMARK()
)""",
table._j_table.getResolvedSchema().toString())
self.t_env.create_temporary_view("t",
ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
result = self.t_env.execute_sql("SELECT "
"c, SUM(b) "
"FROM t "
"GROUP BY c, TUMBLE(rowtime, INTERVAL '0.005' SECOND)")
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row('a', 47), Row('c', 1000), Row('c', 1000)])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
ds = self.t_env.to_data_stream(table)
ds.key_by(lambda k: k.c, key_type=Types.STRING()) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(a,47)', '(c,1000)', '(c,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_from_and_to_changelog_stream_event_time(self):
from pyflink.table import Schema
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW([Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
changelog_stream = ds.map(lambda t: Row(t.f1, t.f2),
Types.ROW([Types.INT(), Types.STRING()]))
# derive physical columns and add a rowtime
table = self.t_env.from_changelog_stream(
changelog_stream,
Schema.new_builder()
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("computed", str(col("f1").upper_case))
.watermark("rowtime", str(source_watermark()))
.build())
self.t_env.create_temporary_view("t", table)
# access and reorder columns
reordered = self.t_env.sql_query("SELECT computed, rowtime, f0 FROM t")
# write out the rowtime column with fully declared schema
result = self.t_env.to_changelog_stream(
reordered,
Schema.new_builder()
.column("f1", DataTypes.STRING())
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("ignored", str(col("f1").upper_case))
.column("f0", DataTypes.INT())
.build()
)
# test event time window and field access
result.key_by(lambda k: k.f1) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(A,47)', '(C,1000)', '(C,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_to_append_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hi")], ["a", "b", "c"])
new_table = table.select(table.a + 1, table.b + 'flink', table.c)
ds = t_env.to_append_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.add_sink(test_sink)
self.env.execute("test_to_append_stream")
result = test_sink.get_results(False)
expected = ['+I[2, Hiflink, Hello]', '+I[3, Helloflink, Hi]']
self.assertEqual(result, expected)
def test_to_retract_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (1, "Hi", "Hello")], ["a", "b", "c"])
new_table = table.group_by(table.c).select(table.a.sum, table.c.alias("b"))
ds = t_env.to_retract_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.map(lambda x: x).add_sink(test_sink)
self.env.execute("test_to_retract_stream")
result = test_sink.get_results(True)
expected = ["(True, Row(f0=1, f1='Hello'))", "(False, Row(f0=1, f1='Hello'))",
"(True, Row(f0=2, f1='Hello'))"]
self.assertEqual(result, expected)
class StreamTableEnvironmentTests(TableEnvironmentTest, PyFlinkStreamTableTestCase):
def test_collect_with_retract(self):
expected_row_kinds = [RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER,
RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER]
element_data = [(1, 2, 'a'),
(3, 4, 'b'),
(5, 6, 'a'),
(7, 8, 'b')]
field_names = ['a', 'b', 'c']
source = self.t_env.from_elements(element_data, field_names)
table_result = self.t_env.execute_sql(
"SELECT SUM(a), c FROM %s group by c" % source)
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
collected_result = [str(result) + ',' + str(result.get_row_kind())
for result in collected_result]
expected_result = [Row(1, 'a'), Row(1, 'a'), Row(6, 'a'), Row(3, 'b'),
Row(3, 'b'), Row(10, 'b')]
for i in range(len(expected_result)):
expected_result[i] = str(expected_result[i]) + ',' + str(expected_row_kinds[i])
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_collect_for_all_data_types(self):
expected_result = [Row(1, None, 1, True, 32767, -2147483648, 1.23,
1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(12, 0, 0, 123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['[pyflink]']), Row(['[pyflink]']), Row(['[pyflink]'])],
{1: Row(['[flink]']), 2: Row(['[pyflink]'])},
decimal.Decimal('1000000000000000000.050000000000000000'),
decimal.Decimal('1000000000000000000.059999999999999999'))]
source = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(hour=12, minute=0, second=0,
microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['pyflink']), Row(['pyflink']), Row(['pyflink'])],
{1: Row(['flink']), 2: Row(['pyflink'])}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))], DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ROW([DataTypes.FIELD('ss2',
DataTypes.STRING())]))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.ROW(
[DataTypes.FIELD('ss', DataTypes.STRING())]))),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("q",
DataTypes.DECIMAL(38, 18))]))
table_result = source.execute()
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
self.assertEqual(expected_result, collected_result)
class VectorUDT(UserDefinedType):
@classmethod
def sql_type(cls):
return DataTypes.ROW(
[
DataTypes.FIELD("type", DataTypes.TINYINT()),
DataTypes.FIELD("size", DataTypes.INT()),
DataTypes.FIELD("indices", DataTypes.ARRAY(DataTypes.INT())),
DataTypes.FIELD("values", DataTypes.ARRAY(DataTypes.DOUBLE())),
]
)
@classmethod
def module(cls):
return "pyflink.ml.core.linalg"
def serialize(self, obj):
if isinstance(obj, DenseVector):
values = [float(v) for v in obj._values]
return 1, None, None, values
else:
raise TypeError("Cannot serialize %r of type %r".format(obj, type(obj)))
def deserialize(self, datum):
pass
class DenseVector(object):
__UDT__ = VectorUDT()
def __init__(self, values):
self._values = values
def size(self) -> int:
return len(self._values)
def get(self, i: int):
return self._values[i]
def to_array(self):
return self._values
@property
def values(self):
return self._values
def __str__(self):
return "[" + ",".join([str(v) for v in self._values]) + "]"
def __repr__(self):
return "DenseVector([%s])" % (", ".join(str(i) for i in self._values))
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_udt(self):
self.t_env.from_elements([
(DenseVector([1, 2, 3, 4]), 0., 1.),
(DenseVector([2, 2, 3, 4]), 0., 2.),
(DenseVector([3, 2, 3, 4]), 0., 3.),
(DenseVector([4, 2, 3, 4]), 0., 4.),
(DenseVector([5, 2, 3, 4]), 0., 5.),
(DenseVector([11, 2, 3, 4]), 1., 1.),
(DenseVector([12, 2, 3, 4]), 1., 2.),
(DenseVector([13, 2, 3, 4]), 1., 3.),
(DenseVector([14, 2, 3, 4]), 1., 4.),
(DenseVector([15, 2, 3, 4]), 1., 5.),
],
DataTypes.ROW([
DataTypes.FIELD("features", VectorUDT()),
DataTypes.FIELD("label", DataTypes.DOUBLE()),
DataTypes.FIELD("weight", DataTypes.DOUBLE())]))
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN)
self.assertIsInstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.register_java_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.register_java_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_load_module_twice(self):
self.check_list_modules('core')
self.check_list_full_modules(1, 'core')
self.assertRaisesRegex(
Py4JJavaError, "A module with name 'core' already exists",
self.t_env.load_module, 'core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
def test_unload_module_twice(self):
self.t_env.unload_module('core')
self.check_list_modules()
self.check_list_full_modules(0)
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'core' exists",
self.t_env.unload_module, 'core')
def test_use_duplicated_modules(self):
self.assertRaisesRegex(
Py4JJavaError, "Module 'core' appears more than once",
self.t_env.use_modules, 'core', 'core')
def test_use_nonexistent_module(self):
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'dummy' exists",
self.t_env.use_modules, 'core', 'dummy')
def test_use_modules(self):
# please do not change this order since ModuleMock depends on FunctionDefinitionMock
_load_specific_flink_module_jars('/flink-table/flink-table-common')
_load_specific_flink_module_jars('/flink-table/flink-table-api-java')
self.t_env.load_module('x', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("x")
))
self.t_env.load_module('y', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("y")
))
self.check_list_modules('core', 'x', 'y')
self.check_list_full_modules(3, 'core', 'x', 'y')
self.t_env.use_modules('y', 'core')
self.check_list_modules('y', 'core')
self.check_list_full_modules(2, 'y', 'core', 'x')
def check_list_modules(self, *expected_used_modules: str):
self.assert_equals(self.t_env.list_modules(), list(expected_used_modules))
def check_list_full_modules(self, used_module_cnt: int, *expected_loaded_modules: str):
self.assert_equals(self.t_env.list_full_modules(),
[ModuleEntry(module,
expected_loaded_modules.index(module) < used_module_cnt)
for module in expected_loaded_modules])
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[0])
class MyTumblingEventTimeWindow(MergingWindowAssigner[tuple, TimeWindow]):
def merge_windows(self,
windows,
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
window_list = [w for w in windows]
window_list.sort()
for i in range(1, len(window_list)):
if window_list[i - 1].end > window_list[i].start:
callback.merge([window_list[i - 1], window_list[i]],
TimeWindow(window_list[i - 1].start, window_list[i].end))
def assign_windows(self,
element: tuple,
timestamp: int,
context):
return [TimeWindow(timestamp, timestamp + 5)]
def get_default_trigger(self, env) -> Trigger[tuple, TimeWindow]:
return SimpleTimeWindowTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
class SimpleTimeWindowTrigger(Trigger[tuple, TimeWindow]):
def on_element(self,
element: tuple,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if time >= window.max_timestamp():
return TriggerResult.FIRE_AND_PURGE
else:
return TriggerResult.CONTINUE
def on_merge(self,
window: TimeWindow,
ctx: 'Trigger.OnMergeContext') -> None:
pass
def clear(self,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> None:
pass
class SumWindowFunction(WindowFunction[tuple, tuple, str, TimeWindow]):
def apply(self, key: str, window: TimeWindow, inputs: Iterable[tuple]):
result = 0
for i in inputs:
result += i[1]
return [(key, result)]
| ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import decimal
import sys
import unittest
from py4j.protocol import Py4JJavaError
from typing import Iterable
from pyflink.common import RowKind, WatermarkStrategy, Configuration
from pyflink.common.serializer import TypeSerializer
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import MergingWindowAssigner, TimeWindow, Trigger, TriggerResult
from pyflink.datastream.functions import WindowFunction
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.datastream.window import TimeWindowSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes, CsvTableSink, StreamTableEnvironment, EnvironmentSettings, \
Module, ResultKind, ModuleEntry
from pyflink.table.catalog import ObjectPath, CatalogBaseTable
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.expressions import col, source_watermark
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.types import RowType, Row, UserDefinedType
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import (
PyFlinkBatchTableTestCase, PyFlinkStreamTableTestCase, PyFlinkTestCase,
_load_specific_flink_module_jars)
from pyflink.util.java_utils import get_j_env_configuration
class TableEnvironmentTest(object):
def test_set_sys_executable_for_local_mode(self):
jvm = get_gateway().jvm
actual_executable = get_j_env_configuration(self.t_env._get_j_env()) \
.getString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
self.assertEqual(sys.executable, actual_executable)
def test_explain(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain()
assert isinstance(actual, str)
def test_explain_with_extended(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN)
assert isinstance(actual, str)
def test_register_functions(self):
t_env = self.t_env
t_env.register_function(
"python_scalar_func", udf(lambda i: i, result_type=DataTypes.INT()))
t_env.register_java_function("scalar_func",
"org.apache.flink.table.legacyutils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.register_java_function("table_func", "org.apache.flink.table.legacyutils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['python_scalar_func', 'scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_load_module_twice(self):
t_env = self.t_env
self.check_list_modules('core')
self.check_list_full_modules(1, 'core')
self.assertRaisesRegex(
Py4JJavaError, "A module with name 'core' already exists",
t_env.load_module, 'core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
def test_unload_module_twice(self):
t_env = self.t_env
t_env.unload_module('core')
self.check_list_modules()
self.check_list_full_modules(0)
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'core' exists",
t_env.unload_module, 'core')
def test_use_modules(self):
# please do not change this order since ModuleMock depends on FunctionDefinitionMock
_load_specific_flink_module_jars('/flink-table/flink-table-common')
_load_specific_flink_module_jars('/flink-table/flink-table-api-java')
t_env = self.t_env
t_env.load_module('x', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("x")
))
t_env.load_module('y', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("y")
))
self.check_list_modules('core', 'x', 'y')
self.check_list_full_modules(3, 'core', 'x', 'y')
t_env.use_modules('y', 'core')
self.check_list_modules('y', 'core')
self.check_list_full_modules(2, 'y', 'core', 'x')
def check_list_modules(self, *expected_used_modules: str):
self.assert_equals(self.t_env.list_modules(), list(expected_used_modules))
def check_list_full_modules(self, used_module_cnt: int, *expected_loaded_modules: str):
self.assert_equals(self.t_env.list_full_modules(),
[ModuleEntry(module,
expected_loaded_modules.index(module) < used_module_cnt)
for module in expected_loaded_modules])
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
def test_create_temporary_table_from_descriptor(self):
from pyflink.table.schema import Schema
t_env = self.t_env
catalog = t_env.get_current_catalog()
database = t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
t_env.create_temporary_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
self.assertFalse(t_env.get_catalog(catalog).table_exists(ObjectPath(database, "T")))
gateway = get_gateway()
catalog_table = CatalogBaseTable(
t_env._j_tenv.getCatalogManager()
.getTable(gateway.jvm.ObjectIdentifier.of(catalog, database, "T"))
.get()
.getTable())
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_create_table_from_descriptor(self):
from pyflink.table.schema import Schema
catalog = self.t_env.get_current_catalog()
database = self.t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
self.t_env.create_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
object_path = ObjectPath(database, "T")
self.assertTrue(self.t_env.get_catalog(catalog).table_exists(object_path))
catalog_table = self.t_env.get_catalog(catalog).get_table(object_path)
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_table_from_descriptor(self):
from pyflink.table.schema import Schema
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
descriptor = TableDescriptor.for_connector("fake").schema(schema).build()
table = self.t_env.from_descriptor(descriptor)
self.assertEqual(schema,
Schema(Schema.new_builder()._j_builder
.fromResolvedSchema(table._j_table.getResolvedSchema()).build()))
contextResolvedTable = table._j_table.getQueryOperation().getContextResolvedTable()
options = contextResolvedTable.getTable().getOptions()
self.assertEqual("fake", options.get("connector"))
class DataStreamConversionTestCases(PyFlinkTestCase):
def setUp(self) -> None:
from pyflink.datastream import StreamExecutionEnvironment
super(DataStreamConversionTestCases, self).setUp()
config = Configuration()
config.set_string("akka.ask.timeout", "20 s")
self.env = StreamExecutionEnvironment.get_execution_environment(config)
self.t_env = StreamTableEnvironment.create(self.env)
self.env.set_parallelism(2)
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "1")
self.test_sink = DataStreamTestSinkFunction()
def test_from_data_stream_atomic(self):
data_stream = self.env.from_collection([(1,), (2,), (3,), (4,), (5,)])
result = self.t_env.from_data_stream(data_stream).execute()
self.assertEqual("""(
`f0` RAW('[B', '...')
)""",
result._j_table_result.getResolvedSchema().toString())
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in map(str, [Row(1), Row(2), Row(3), Row(4), Row(5)])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_to_data_stream_atomic(self):
table = self.t_env.from_elements([(1,), (2,), (3,)], ["a"])
ds = self.t_env.to_data_stream(table)
ds.add_sink(self.test_sink)
self.env.execute()
results = self.test_sink.get_results(False)
results.sort()
expected = ['+I[1]', '+I[2]', '+I[3]']
self.assertEqual(expected, results)
def test_from_data_stream(self):
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW([Types.INT(),
Types.STRING(),
Types.STRING()]))
t_env = self.t_env
table = t_env.from_data_stream(ds)
field_names = ['a', 'b', 'c']
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink("Sink",
source_sink_utils.TestAppendSink(field_names, field_types))
table.execute_insert("Sink").wait()
result = source_sink_utils.results()
expected = ['+I[1, Hi, Hello]', '+I[2, Hello, Hi]']
self.assert_equals(result, expected)
ds = ds.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()])) \
.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()]))
table = t_env.from_data_stream(ds, col('a'), col('b'), col('c'))
t_env.register_table_sink("ExprSink",
source_sink_utils.TestAppendSink(field_names, field_types))
table.execute_insert("ExprSink").wait()
result = source_sink_utils.results()
self.assert_equals(result, expected)
def test_from_data_stream_with_schema(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW_NAMED(
["a", "b", "c"],
[Types.INT(), Types.STRING(), Types.STRING()]))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column("a", DataTypes.INT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
result = table.execute()
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row(1, 'Hi', 'Hello'), Row(2, 'Hello', 'Hi')])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
@unittest.skip
def test_from_and_to_data_stream_event_time(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW_NAMED(
["a", "b", "c"],
[Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
self.assertEqual("""(
`a` BIGINT,
`b` INT,
`c` STRING,
`rowtime` TIMESTAMP_LTZ(3) *ROWTIME* METADATA,
WATERMARK FOR `rowtime`: TIMESTAMP_LTZ(3) AS SOURCE_WATERMARK()
)""",
table._j_table.getResolvedSchema().toString())
self.t_env.create_temporary_view("t",
ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
result = self.t_env.execute_sql("SELECT "
"c, SUM(b) "
"FROM t "
"GROUP BY c, TUMBLE(rowtime, INTERVAL '0.005' SECOND)")
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row('a', 47), Row('c', 1000), Row('c', 1000)])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
ds = self.t_env.to_data_stream(table)
ds.key_by(lambda k: k.c, key_type=Types.STRING()) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(a,47)', '(c,1000)', '(c,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_from_and_to_changelog_stream_event_time(self):
from pyflink.table import Schema
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW([Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
changelog_stream = ds.map(lambda t: Row(t.f1, t.f2),
Types.ROW([Types.INT(), Types.STRING()]))
# derive physical columns and add a rowtime
table = self.t_env.from_changelog_stream(
changelog_stream,
Schema.new_builder()
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("computed", str(col("f1").upper_case))
.watermark("rowtime", str(source_watermark()))
.build())
self.t_env.create_temporary_view("t", table)
# access and reorder columns
reordered = self.t_env.sql_query("SELECT computed, rowtime, f0 FROM t")
# write out the rowtime column with fully declared schema
result = self.t_env.to_changelog_stream(
reordered,
Schema.new_builder()
.column("f1", DataTypes.STRING())
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("ignored", str(col("f1").upper_case))
.column("f0", DataTypes.INT())
.build()
)
# test event time window and field access
result.key_by(lambda k: k.f1) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(A,47)', '(C,1000)', '(C,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_to_append_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hi")], ["a", "b", "c"])
new_table = table.select(table.a + 1, table.b + 'flink', table.c)
ds = t_env.to_append_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.add_sink(test_sink)
self.env.execute("test_to_append_stream")
result = test_sink.get_results(False)
expected = ['+I[2, Hiflink, Hello]', '+I[3, Helloflink, Hi]']
self.assertEqual(result, expected)
def test_to_retract_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (1, "Hi", "Hello")], ["a", "b", "c"])
new_table = table.group_by(table.c).select(table.a.sum, table.c.alias("b"))
ds = t_env.to_retract_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.map(lambda x: x).add_sink(test_sink)
self.env.execute("test_to_retract_stream")
result = test_sink.get_results(True)
expected = ["(True, Row(f0=1, f1='Hello'))", "(False, Row(f0=1, f1='Hello'))",
"(True, Row(f0=2, f1='Hello'))"]
self.assertEqual(result, expected)
class StreamTableEnvironmentTests(TableEnvironmentTest, PyFlinkStreamTableTestCase):
def test_collect_with_retract(self):
expected_row_kinds = [RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER,
RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER]
element_data = [(1, 2, 'a'),
(3, 4, 'b'),
(5, 6, 'a'),
(7, 8, 'b')]
field_names = ['a', 'b', 'c']
source = self.t_env.from_elements(element_data, field_names)
table_result = self.t_env.execute_sql(
"SELECT SUM(a), c FROM %s group by c" % source)
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
collected_result = [str(result) + ',' + str(result.get_row_kind())
for result in collected_result]
expected_result = [Row(1, 'a'), Row(1, 'a'), Row(6, 'a'), Row(3, 'b'),
Row(3, 'b'), Row(10, 'b')]
for i in range(len(expected_result)):
expected_result[i] = str(expected_result[i]) + ',' + str(expected_row_kinds[i])
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_collect_for_all_data_types(self):
expected_result = [Row(1, None, 1, True, 32767, -2147483648, 1.23,
1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(12, 0, 0, 123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['[pyflink]']), Row(['[pyflink]']), Row(['[pyflink]'])],
{1: Row(['[flink]']), 2: Row(['[pyflink]'])},
decimal.Decimal('1000000000000000000.050000000000000000'),
decimal.Decimal('1000000000000000000.059999999999999999'))]
source = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(hour=12, minute=0, second=0,
microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['pyflink']), Row(['pyflink']), Row(['pyflink'])],
{1: Row(['flink']), 2: Row(['pyflink'])}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))], DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ROW([DataTypes.FIELD('ss2',
DataTypes.STRING())]))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.ROW(
[DataTypes.FIELD('ss', DataTypes.STRING())]))),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("q",
DataTypes.DECIMAL(38, 18))]))
table_result = source.execute()
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
self.assertEqual(expected_result, collected_result)
class VectorUDT(UserDefinedType):
@classmethod
def sql_type(cls):
return DataTypes.ROW(
[
DataTypes.FIELD("type", DataTypes.TINYINT()),
DataTypes.FIELD("size", DataTypes.INT()),
DataTypes.FIELD("indices", DataTypes.ARRAY(DataTypes.INT())),
DataTypes.FIELD("values", DataTypes.ARRAY(DataTypes.DOUBLE())),
]
)
@classmethod
def module(cls):
return "pyflink.ml.core.linalg"
def serialize(self, obj):
if isinstance(obj, DenseVector):
values = [float(v) for v in obj._values]
return 1, None, None, values
else:
raise TypeError("Cannot serialize %r of type %r".format(obj, type(obj)))
def deserialize(self, datum):
pass
class DenseVector(object):
__UDT__ = VectorUDT()
def __init__(self, values):
self._values = values
def size(self) -> int:
return len(self._values)
def get(self, i: int):
return self._values[i]
def to_array(self):
return self._values
@property
def values(self):
return self._values
def __str__(self):
return "[" + ",".join([str(v) for v in self._values]) + "]"
def __repr__(self):
return "DenseVector([%s])" % (", ".join(str(i) for i in self._values))
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_udt(self):
self.t_env.from_elements([
(DenseVector([1, 2, 3, 4]), 0., 1.),
(DenseVector([2, 2, 3, 4]), 0., 2.),
(DenseVector([3, 2, 3, 4]), 0., 3.),
(DenseVector([4, 2, 3, 4]), 0., 4.),
(DenseVector([5, 2, 3, 4]), 0., 5.),
(DenseVector([11, 2, 3, 4]), 1., 1.),
(DenseVector([12, 2, 3, 4]), 1., 2.),
(DenseVector([13, 2, 3, 4]), 1., 3.),
(DenseVector([14, 2, 3, 4]), 1., 4.),
(DenseVector([15, 2, 3, 4]), 1., 5.),
],
DataTypes.ROW([
DataTypes.FIELD("features", VectorUDT()),
DataTypes.FIELD("label", DataTypes.DOUBLE()),
DataTypes.FIELD("weight", DataTypes.DOUBLE())]))
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN)
self.assertIsInstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.register_java_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.register_java_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_load_module_twice(self):
self.check_list_modules('core')
self.check_list_full_modules(1, 'core')
self.assertRaisesRegex(
Py4JJavaError, "A module with name 'core' already exists",
self.t_env.load_module, 'core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
def test_unload_module_twice(self):
self.t_env.unload_module('core')
self.check_list_modules()
self.check_list_full_modules(0)
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'core' exists",
self.t_env.unload_module, 'core')
def test_use_duplicated_modules(self):
self.assertRaisesRegex(
Py4JJavaError, "Module 'core' appears more than once",
self.t_env.use_modules, 'core', 'core')
def test_use_nonexistent_module(self):
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'dummy' exists",
self.t_env.use_modules, 'core', 'dummy')
def test_use_modules(self):
# please do not change this order since ModuleMock depends on FunctionDefinitionMock
_load_specific_flink_module_jars('/flink-table/flink-table-common')
_load_specific_flink_module_jars('/flink-table/flink-table-api-java')
self.t_env.load_module('x', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("x")
))
self.t_env.load_module('y', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("y")
))
self.check_list_modules('core', 'x', 'y')
self.check_list_full_modules(3, 'core', 'x', 'y')
self.t_env.use_modules('y', 'core')
self.check_list_modules('y', 'core')
self.check_list_full_modules(2, 'y', 'core', 'x')
def check_list_modules(self, *expected_used_modules: str):
self.assert_equals(self.t_env.list_modules(), list(expected_used_modules))
def check_list_full_modules(self, used_module_cnt: int, *expected_loaded_modules: str):
self.assert_equals(self.t_env.list_full_modules(),
[ModuleEntry(module,
expected_loaded_modules.index(module) < used_module_cnt)
for module in expected_loaded_modules])
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.legacyutils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.legacyutils.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.legacyutils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[0])
class MyTumblingEventTimeWindow(MergingWindowAssigner[tuple, TimeWindow]):
def merge_windows(self,
windows,
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
window_list = [w for w in windows]
window_list.sort()
for i in range(1, len(window_list)):
if window_list[i - 1].end > window_list[i].start:
callback.merge([window_list[i - 1], window_list[i]],
TimeWindow(window_list[i - 1].start, window_list[i].end))
def assign_windows(self,
element: tuple,
timestamp: int,
context):
return [TimeWindow(timestamp, timestamp + 5)]
def get_default_trigger(self, env) -> Trigger[tuple, TimeWindow]:
return SimpleTimeWindowTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
class SimpleTimeWindowTrigger(Trigger[tuple, TimeWindow]):
def on_element(self,
element: tuple,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if time >= window.max_timestamp():
return TriggerResult.FIRE_AND_PURGE
else:
return TriggerResult.CONTINUE
def on_merge(self,
window: TimeWindow,
ctx: 'Trigger.OnMergeContext') -> None:
pass
def clear(self,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> None:
pass
class SumWindowFunction(WindowFunction[tuple, tuple, str, TimeWindow]):
def apply(self, key: str, window: TimeWindow, inputs: Iterable[tuple]):
result = 0
for i in inputs:
result += i[1]
return [(key, result)]
| en | 0.685581 | ################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. ################################################################################ # please do not change this order since ModuleMock depends on FunctionDefinitionMock ( `f0` RAW('[B', '...') ) ( `a` BIGINT, `b` INT, `c` STRING, `rowtime` TIMESTAMP_LTZ(3) *ROWTIME* METADATA, WATERMARK FOR `rowtime`: TIMESTAMP_LTZ(3) AS SOURCE_WATERMARK() ) # derive physical columns and add a rowtime # access and reorder columns # write out the rowtime column with fully declared schema # test event time window and field access # please do not change this order since ModuleMock depends on FunctionDefinitionMock | 1.585718 | 2 |
discriminator.py | Dcoder99/CycleGAN-Unpaired-Image-translation | 0 | 6624868 | import tensorflow as tf
def convLayer(input, k, slope=0.2, stride=2, reuse=False, is_training=True, name=None):
with tf.variable_scope(name, reuse=reuse):
weights_shape = shape=[4, 4, input.get_shape()[3], k]
W_var = tf.get_variable("W_var", weights_shape,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
conv = tf.nn.conv2d(input, W_var, strides=[1, stride, stride, 1], padding='SAME')
normalized = instance_norm(conv)
output = tf.maximum(slope*normalized, normalized)#leakyRelu
return output
def lastLayer(input, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights_shape = [4, 4, input.get_shape()[3], 1]
W_var = tf.get_variable("W_var", weights_shape,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
bias_shape = [1]
b_var = tf.get_variable("b_var", bias_shape, initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, W_var, strides=[1, 1, 1, 1], padding='SAME')
output = conv + b_var
return output
class Discriminator:
def __init__(self, name, is_training):
self.name = name
self.is_training = is_training
self.reuse = False
def __call__(self, input):
with tf.variable_scope(self.name):
C64 = convLayer(input, 64, reuse=self.reuse,
is_training=self.is_training, name='C64')
C128 = convLayer(C64, 128, reuse=self.reuse,
is_training=self.is_training, name='C128')
C256 = convLayer(C128, 256, reuse=self.reuse,
is_training=self.is_training, name='C256')
C512 = convLayer(C256, 512,reuse=self.reuse,
is_training=self.is_training, name='C512')
output = lastLayer(C512, reuse=self.reuse, name='output')
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
return output
def instance_norm(input):
with tf.variable_scope("instance_norm"):
depth = input.get_shape()[3]
scale = tf.get_variable("scale", [depth], \
initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02, dtype=tf.float32))
offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0))
mean, variance = tf.nn.moments(input, axes=[1,2], keep_dims=True)
epsilon = 1e-5
inv = tf.rsqrt(variance + epsilon)
normalized = (input-mean)*inv
return scale*normalized + offset | import tensorflow as tf
def convLayer(input, k, slope=0.2, stride=2, reuse=False, is_training=True, name=None):
with tf.variable_scope(name, reuse=reuse):
weights_shape = shape=[4, 4, input.get_shape()[3], k]
W_var = tf.get_variable("W_var", weights_shape,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
conv = tf.nn.conv2d(input, W_var, strides=[1, stride, stride, 1], padding='SAME')
normalized = instance_norm(conv)
output = tf.maximum(slope*normalized, normalized)#leakyRelu
return output
def lastLayer(input, reuse=False, name=None):
with tf.variable_scope(name, reuse=reuse):
weights_shape = [4, 4, input.get_shape()[3], 1]
W_var = tf.get_variable("W_var", weights_shape,
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02, dtype=tf.float32))
bias_shape = [1]
b_var = tf.get_variable("b_var", bias_shape, initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, W_var, strides=[1, 1, 1, 1], padding='SAME')
output = conv + b_var
return output
class Discriminator:
def __init__(self, name, is_training):
self.name = name
self.is_training = is_training
self.reuse = False
def __call__(self, input):
with tf.variable_scope(self.name):
C64 = convLayer(input, 64, reuse=self.reuse,
is_training=self.is_training, name='C64')
C128 = convLayer(C64, 128, reuse=self.reuse,
is_training=self.is_training, name='C128')
C256 = convLayer(C128, 256, reuse=self.reuse,
is_training=self.is_training, name='C256')
C512 = convLayer(C256, 512,reuse=self.reuse,
is_training=self.is_training, name='C512')
output = lastLayer(C512, reuse=self.reuse, name='output')
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
return output
def instance_norm(input):
with tf.variable_scope("instance_norm"):
depth = input.get_shape()[3]
scale = tf.get_variable("scale", [depth], \
initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02, dtype=tf.float32))
offset = tf.get_variable("offset", [depth], initializer=tf.constant_initializer(0.0))
mean, variance = tf.nn.moments(input, axes=[1,2], keep_dims=True)
epsilon = 1e-5
inv = tf.rsqrt(variance + epsilon)
normalized = (input-mean)*inv
return scale*normalized + offset | ms | 0.213645 | #leakyRelu | 2.665155 | 3 |
guestbook.py | kishalayraj/kishalay-sudoku | 0 | 6624869 | <reponame>kishalayraj/kishalay-sudoku<gh_stars>0
import os
import urllib
import Generator
import Solver
import random
from google.appengine.api import users
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
DEFAULT_GUESTBOOK_NAME = 'default_guestbook'
sent_cells = ['0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0']
sol_cells = ['0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0']
# We set a parent key on the 'Greetings' to ensure that they are all
# in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate should be limited to
# ~1/second.
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Constructs a Datastore key for a Guestbook entity.
We use guestbook_name as the key.
"""
return ndb.Key('Guestbook', guestbook_name)
class Author(ndb.Model):
"""Sub model for representing an author."""
identity = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
cells = ndb.StringProperty(repeated=True,indexed=False)
class Greeting(ndb.Model):
"""A main model for representing an individual Guestbook entry."""
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
sudoku_id = ndb.IntegerProperty(indexed=False)
class MainPage(webapp2.RequestHandler):
def get(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(1)
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user': user,
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class Guestbook(webapp2.RequestHandler):
def post(self):
# We set the same parent key on the 'Greeting' to ensure each
# Greeting is in the same entity group. Queries across the
# single entity group will be consistent. However, the write
# rate to a single entity group should be limited to
# ~1/second.
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
greeting.content = self.request.get('content')
level = 0
#greeting.sudoku_id = random.randint(0,99)
greeting.sudoku_id = 0
if greeting.content == "Easy":
level = 1
if greeting.content == "Medium":
level = 2
if greeting.content == "Difficult":
level = 3
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
s1 = [0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0]
Generator.fill_sudoku(s,0,0)
Generator.reduce_sudoku(s,level)
for i in range(0,9):
for j in range(0,9):
sent_cells[9*i+j]=str(s[i][j])
"""fname = 'SudokuPuzzles.txt'
with open(fname) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
s1 = content[greeting.sudoku_id]
for i in range(0,81):
sent_cells[i] = str(s1[i])"""
if users.get_current_user():
greeting.author = Author(
identity= users.get_current_user().user_id(),
email= users.get_current_user().email(),
cells= sent_cells)
greeting.put()
query_params = {'guestbook_name': guestbook_name}
#taskqueue.add(url='/genSudoku', params={'key': greeting.content})
self.redirect('/?' + urllib.urlencode(query_params))
class CheckSudoku(webapp2.RequestHandler):
def post(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(1)
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
for i in range(0,81):
row = i // 9
col = i % 9
if sent_cells[i] != '':
s[row][col] = int(sent_cells[i])
else:
s[row][col] = 0
for greeting in greetings:
for i in range(0,81):
if sent_cells[i] == '0':
sent_cells[i] = str(self.request.get('e'+str(i/9)+str(i%9)))
"""fname = 'Solutions.txt'
with open(fname) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
s = content[greeting.sudoku_id]"""
Solver.initial_fill(s)
for line in s:
if 0 in line:
Solver.solve(s, 0, 0)
break
for i in range(0,9):
for j in range(0,9):
sol_cells[9*i+j] = str(s[i][j])
flag = True
for i in range(0,81):
if sol_cells[i] != sent_cells[i]:
flag = False
break
template_values = {
'flag': flag,
'sol_cells': sol_cells,
}
template = JINJA_ENVIRONMENT.get_template('result.html')
self.response.write(template.render(template_values))
"""class GenSudoku(webapp2.RequestHandler):
def post(self):
key = self.request.get('key')
@ndb.transactional
def gen():
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
Generator.populate_board(s,0,0)
Generator.reduce_sudoku(s,1)
fname = 'SudokuPuzzles.txt'
f = open(fname, "w")
output = ""
for i in range(9):
for j in range(9):
output += str(s[i][j])
output = output + "\n"
f.write(output)
gen()"""
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
('/checkSudoku', CheckSudoku),
], debug=True)
| import os
import urllib
import Generator
import Solver
import random
from google.appengine.api import users
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
DEFAULT_GUESTBOOK_NAME = 'default_guestbook'
sent_cells = ['0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0']
sol_cells = ['0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0',
'0','1','0','1','0','1','0','1','0']
# We set a parent key on the 'Greetings' to ensure that they are all
# in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate should be limited to
# ~1/second.
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Constructs a Datastore key for a Guestbook entity.
We use guestbook_name as the key.
"""
return ndb.Key('Guestbook', guestbook_name)
class Author(ndb.Model):
"""Sub model for representing an author."""
identity = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
cells = ndb.StringProperty(repeated=True,indexed=False)
class Greeting(ndb.Model):
"""A main model for representing an individual Guestbook entry."""
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
sudoku_id = ndb.IntegerProperty(indexed=False)
class MainPage(webapp2.RequestHandler):
def get(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(1)
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user': user,
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class Guestbook(webapp2.RequestHandler):
def post(self):
# We set the same parent key on the 'Greeting' to ensure each
# Greeting is in the same entity group. Queries across the
# single entity group will be consistent. However, the write
# rate to a single entity group should be limited to
# ~1/second.
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
greeting.content = self.request.get('content')
level = 0
#greeting.sudoku_id = random.randint(0,99)
greeting.sudoku_id = 0
if greeting.content == "Easy":
level = 1
if greeting.content == "Medium":
level = 2
if greeting.content == "Difficult":
level = 3
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
s1 = [0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0]
Generator.fill_sudoku(s,0,0)
Generator.reduce_sudoku(s,level)
for i in range(0,9):
for j in range(0,9):
sent_cells[9*i+j]=str(s[i][j])
"""fname = 'SudokuPuzzles.txt'
with open(fname) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
s1 = content[greeting.sudoku_id]
for i in range(0,81):
sent_cells[i] = str(s1[i])"""
if users.get_current_user():
greeting.author = Author(
identity= users.get_current_user().user_id(),
email= users.get_current_user().email(),
cells= sent_cells)
greeting.put()
query_params = {'guestbook_name': guestbook_name}
#taskqueue.add(url='/genSudoku', params={'key': greeting.content})
self.redirect('/?' + urllib.urlencode(query_params))
class CheckSudoku(webapp2.RequestHandler):
def post(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(1)
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
for i in range(0,81):
row = i // 9
col = i % 9
if sent_cells[i] != '':
s[row][col] = int(sent_cells[i])
else:
s[row][col] = 0
for greeting in greetings:
for i in range(0,81):
if sent_cells[i] == '0':
sent_cells[i] = str(self.request.get('e'+str(i/9)+str(i%9)))
"""fname = 'Solutions.txt'
with open(fname) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
s = content[greeting.sudoku_id]"""
Solver.initial_fill(s)
for line in s:
if 0 in line:
Solver.solve(s, 0, 0)
break
for i in range(0,9):
for j in range(0,9):
sol_cells[9*i+j] = str(s[i][j])
flag = True
for i in range(0,81):
if sol_cells[i] != sent_cells[i]:
flag = False
break
template_values = {
'flag': flag,
'sol_cells': sol_cells,
}
template = JINJA_ENVIRONMENT.get_template('result.html')
self.response.write(template.render(template_values))
"""class GenSudoku(webapp2.RequestHandler):
def post(self):
key = self.request.get('key')
@ndb.transactional
def gen():
s = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
Generator.populate_board(s,0,0)
Generator.reduce_sudoku(s,1)
fname = 'SudokuPuzzles.txt'
f = open(fname, "w")
output = ""
for i in range(9):
for j in range(9):
output += str(s[i][j])
output = output + "\n"
f.write(output)
gen()"""
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
('/checkSudoku', CheckSudoku),
], debug=True) | en | 0.761065 | # We set a parent key on the 'Greetings' to ensure that they are all # in the same entity group. Queries across the single entity group # will be consistent. However, the write rate should be limited to # ~1/second. Constructs a Datastore key for a Guestbook entity. We use guestbook_name as the key. Sub model for representing an author. A main model for representing an individual Guestbook entry. # We set the same parent key on the 'Greeting' to ensure each # Greeting is in the same entity group. Queries across the # single entity group will be consistent. However, the write # rate to a single entity group should be limited to # ~1/second. #greeting.sudoku_id = random.randint(0,99) fname = 'SudokuPuzzles.txt' with open(fname) as f: content = f.readlines() content = [x.strip('\n') for x in content] s1 = content[greeting.sudoku_id] for i in range(0,81): sent_cells[i] = str(s1[i]) #taskqueue.add(url='/genSudoku', params={'key': greeting.content}) fname = 'Solutions.txt' with open(fname) as f: content = f.readlines() content = [x.strip('\n') for x in content] s = content[greeting.sudoku_id] class GenSudoku(webapp2.RequestHandler): def post(self): key = self.request.get('key') @ndb.transactional def gen(): s = [[0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0,0]] Generator.populate_board(s,0,0) Generator.reduce_sudoku(s,1) fname = 'SudokuPuzzles.txt' f = open(fname, "w") output = "" for i in range(9): for j in range(9): output += str(s[i][j]) output = output + "\n" f.write(output) gen() | 1.964292 | 2 |
Code/neuroconnect/plot.py | seankmartin/SKMNeuralConnections | 0 | 6624870 | <filename>Code/neuroconnect/plot.py
"""Plotting functions."""
import os
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import MaxNLocator
here = os.path.dirname(os.path.realpath(__file__))
PALETTE = "dark"
LABELSIZE = 12
def load_df(name):
"""Load a pandas dataframe from csv file at results/name."""
load_name = os.path.join(here, "..", "results", name)
df = pd.read_csv(load_name)
return df
def despine():
"""Despine the current plot with trimming."""
sns.despine(offset=0, trim=True)
def set_p():
"""Set the seaborn palette."""
sns.set_palette(PALETTE)
# sns.set_context(
# "paper",
# rc={
# "axes.titlesize": 18,
# "axes.labelsize": 14,
# "lines.linewidth": 2,
# },
# )
sns.set_context(
"paper",
font_scale=1.4,
rc={"lines.linewidth": 3.2},
)
def set_m():
sns.set_context(
"paper",
font_scale=1.4,
rc={"lines.linewidth": 1.5},
)
def save(fig, out_name):
"""Save the figure to figures/out_name."""
out_path = os.path.abspath(os.path.join(here, "..", "figures", out_name))
print("Saving figure to {}".format(out_path))
os.makedirs(os.path.dirname(out_path), exist_ok=True)
if fig is not None:
fig.savefig(out_path, dpi=400)
else:
plt.savefig(out_path, dpi=400)
plt.close(fig)
def plot_samples_v_prop(df, out_name="depth_plot.pdf"):
"""Plot the number of samples against proportion of connections."""
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y="Proportion of connections",
data=df,
style="Max distance",
hue="Max distance",
ax=ax,
)
ax.set_xlabel("Number of samples", fontsize=LABELSIZE)
ax.set_ylabel("Expected proportion connected", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf(df, out_name, full=False):
"""Plot the pmf from the given dataframe."""
fig, ax = plt.subplots()
set_m()
x = df["Number of sampled connected neurons"]
y = df["Probability"]
ax.plot(x, y, "ko", ms=2.5)
y_vals_min = [0 for _ in x]
y_vals_max = y
colors = ["k" for _ in x]
if len(x) < 30:
ax.set_xticks([i for i in range(len(x))])
ax.set_xticklabels([i for i in range(len(x))])
if full:
ax.set_ylim([0, 1])
ax.vlines(x, y_vals_min, y_vals_max, colors=colors)
plt.xlabel("Number of sampled connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_connection_samples(df, out_name):
"""Plot the connection samples from the dataframe."""
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y="Proportion of connections",
data=df,
style="Max distance",
hue="Max distance",
ax=ax,
)
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel("Proportion of connections", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf_accuracy(df, out_name):
"""Plot the accuracy of the PMF from the dataframe."""
fig, ax = plt.subplots()
set_p()
calculation_vals = df["Calculation"].values
has_numbers = False
greater_than_one = False
hue = "Calculation"
for val in calculation_vals:
if val.endswith("1"):
has_numbers = True
if val.endswith("2"):
greater_than_one = True
if has_numbers:
df["Max geodesic distance"] = df.apply(
lambda row: str(row.Calculation.split(" ")[-1]), axis=1
)
df["Calculation"] = df.apply(lambda row: row.Calculation[:-2], axis=1)
hue = "Max geodesic distance"
df = df.sort_values(by=["Calculation"], ascending=False)
if greater_than_one:
sns.lineplot(
x="Number of connected neurons",
y="Probability",
hue=hue,
style="Calculation",
ci=None,
data=df,
ax=ax,
)
else:
sns.lineplot(
x="Number of connected neurons",
y="Probability",
hue="Calculation",
style="Calculation",
ci=None,
data=df,
ax=ax,
)
plt.xlabel("Number of connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf_comp(dfs, names, out_name):
"""Plot the rate of convergence of the dataframe."""
df = None
for df_i, name_i in zip(dfs, names):
df_i["Connectivity"] = [name_i for _ in range(len(df_i))]
if df is None:
df = df_i
else:
df = df.append(df_i)
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of sampled connected neurons",
y="Probability",
hue="Connectivity",
style="Connectivity",
data=df,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of sampled connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_exp_comp(dfs, names, out_name, prop=False):
"""Plot the accuracy of the expected value."""
df = None
for df_i, name_i in zip(dfs, names):
df_i["Connectivity"] = [name_i for _ in range(len(df_i))]
if df is None:
df = df_i
else:
df = df.append(df_i)
if prop:
y_name = "Expected proportion connected"
else:
y_name = "Expected connected"
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y=y_name,
data=df,
style="Connectivity",
hue="Connectivity",
ax=ax,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel(y_name, fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_exp_accuracy(df, out_name, prop=False, split=True):
"""Plot the accuracy of the expected value."""
if prop:
y_name = "Expected proportion connected"
else:
y_name = "Expected connected"
if split:
hue = "Max distance"
else:
hue = "Calculation"
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y=y_name,
data=df,
style="Calculation",
hue=hue,
ax=ax,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel(y_name, fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_region_vals(df, out_name, x_name="Connectivity", scale=(10, 4)):
"""Plot region specific values from the dataframe."""
fig, ax = plt.subplots(figsize=scale)
set_p()
sns.barplot(
x=x_name,
y="Expected proportion connected",
hue="Calculation",
data=df,
ax=ax,
)
plt.xlabel(x_name, fontsize=LABELSIZE)
plt.ylabel("Expected proportion connected", fontsize=LABELSIZE)
despine()
save(fig=None, out_name=out_name)
def plot_region_sim(df, out_name, x_name="Connectivity", scale=(10, 4)):
"""Plot region specific values from the dataframe."""
fig, ax = plt.subplots(figsize=scale)
set_p()
sns.barplot(
x=x_name,
y="Bhattacharyya distance",
data=df,
ax=ax,
)
plt.xlabel(x_name, fontsize=LABELSIZE)
plt.ylabel("Bhattacharyya distance", fontsize=LABELSIZE)
despine()
save(fig=None, out_name=out_name)
def plot_distribution(dist, out_name):
"""Plot the pmf given by the distribution."""
fig, ax = plt.subplots()
set_p()
x = list(dist.keys())
y = list(dist.values())
ax.plot(x, y, "ko", ms=2.5)
y_vals_min = [0 for _ in x]
y_vals_max = y
colors = ["k" for _ in x]
ax.vlines(x, y_vals_min, y_vals_max, colors=colors)
plt.xlabel("Value", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_acc_interp(x_samps, interped_vals, xvals, yvals, out_name, true_y=None):
"""Plot the accuracy of interpolation."""
fig, ax = plt.subplots()
set_p()
ax.plot(x_samps, interped_vals, c="b", linestyle="-", label="interp")
ax.plot(xvals, yvals, "gx", label="samples", markersize="3.0")
if true_y is not None:
ax.plot(x_samps, true_y, c="r", linestyle="--", label="true")
plt.legend()
plt.xlabel("Number of receivers in B", fontsize=LABELSIZE)
plt.ylabel("Weighted probability", fontsize=LABELSIZE)
save(fig, out_name)
def plot_dist_explain(dfs, out_names):
"""Plot the explanation of computing the distributions."""
fig, ax = plt.subplots()
set_p()
df = dfs[0]
sns.lineplot(x="Number of sampled senders", y="Probability", ax=ax, data=df)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of sampled senders", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[0])
fig, ax = plt.subplots(figsize=(7, 5.2))
set_p()
df = dfs[1]
sns.lineplot(x="Number of receivers", y="Probability", ax=ax, data=df)
plt.xlabel("Number of receivers", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[1])
fig, ax = plt.subplots(figsize=(7, 5.2))
set_p()
df = dfs[2]
st = [0, 4, 8, 12, 16, 20]
conds = []
for val in st:
conds.append(df["Number of sampled A"] == val)
final_filt = conds[0]
for val in conds[1:]:
final_filt = final_filt | val
sns.lineplot(
x="Number of receivers",
y="Probability",
ax=ax,
data=df[final_filt].astype({"Number of sampled A": "str"}),
style="Number of sampled A",
hue="Number of sampled A",
)
plt.xlabel("Number of receivers", fontsize=LABELSIZE)
plt.ylabel("Weighted probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[2])
fig, ax = plt.subplots()
set_p()
df = dfs[3]
sns.lineplot(x="Number of sampled receivers", y="Probability", ax=ax, data=df)
plt.xlabel("Number of sampled receivers", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
despine()
save(fig, out_names[3])
def main():
"""Defines the plots performed in produce_figures - without performing analysis."""
print("Starting main plotting")
# Mouse plots
plot_samples_v_prop(load_df("MOp_to_SSP-ll_depth.csv"), "mouse_samps.pdf")
plot_region_vals(
load_df("mouse_region_exp_fig.csv"),
"mouse_region_exp.pdf",
x_name="Regions",
scale=(12, 5),
)
# Accuracy plots
plot_exp_accuracy(
load_df("connection_samples_fig.csv"), "samples_acc.pdf", prop=True
)
plot_pmf_accuracy(load_df("pmf_comp_fig.csv"), "d3_acc.pdf")
plot_pmf_accuracy(load_df("pmf_comp_pmf.csv"), "pmf_acc.pdf")
plot_region_vals(load_df("exp_man.csv"), "region_acc_man.pdf", scale=(12, 5))
plot_pmf_accuracy(load_df("MOp_to_SSP-ll_pmf_final_1_79.csv"), "pmf_mouse_acc.pdf")
plot_exp_accuracy(
load_df("total_b_exp_fig.csv"), "exp_total_b.pdf", prop=True, split=False
)
# Example plots - HC/SUB
plot_pmf(load_df("tetrode_man.csv"), "tetrode_pmf.pdf")
plot_pmf(load_df("npix_man.csv"), "npix_pmf.pdf")
plot_exp_comp(
load_df("connection_samples_hc_high.csv"),
load_df("connection_samples_hc_low.csv"),
"samples_hc_both.pdf",
prop=True,
)
plot_pmf_comp(
load_df("tetrode_sub_high.csv"),
load_df("tetrode_sub_low.csv"),
"ca1_sub_tet_comp.pdf",
)
# Explanation figures - mostly done in other function
plot_dist_explain(
[
load_df("a_prob_eg.csv"),
load_df("b_prob_eg.csv"),
load_df("b_each_eg.csv"),
load_df("b_fin_eg.csv"),
],
[
"a_prob_eg.pdf",
"b_prob_eg.pdf",
"b_each_eg.pdf",
"b_fin_eg.pdf",
],
)
if __name__ == "__main__":
main()
| <filename>Code/neuroconnect/plot.py
"""Plotting functions."""
import os
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import MaxNLocator
here = os.path.dirname(os.path.realpath(__file__))
PALETTE = "dark"
LABELSIZE = 12
def load_df(name):
"""Load a pandas dataframe from csv file at results/name."""
load_name = os.path.join(here, "..", "results", name)
df = pd.read_csv(load_name)
return df
def despine():
"""Despine the current plot with trimming."""
sns.despine(offset=0, trim=True)
def set_p():
"""Set the seaborn palette."""
sns.set_palette(PALETTE)
# sns.set_context(
# "paper",
# rc={
# "axes.titlesize": 18,
# "axes.labelsize": 14,
# "lines.linewidth": 2,
# },
# )
sns.set_context(
"paper",
font_scale=1.4,
rc={"lines.linewidth": 3.2},
)
def set_m():
sns.set_context(
"paper",
font_scale=1.4,
rc={"lines.linewidth": 1.5},
)
def save(fig, out_name):
"""Save the figure to figures/out_name."""
out_path = os.path.abspath(os.path.join(here, "..", "figures", out_name))
print("Saving figure to {}".format(out_path))
os.makedirs(os.path.dirname(out_path), exist_ok=True)
if fig is not None:
fig.savefig(out_path, dpi=400)
else:
plt.savefig(out_path, dpi=400)
plt.close(fig)
def plot_samples_v_prop(df, out_name="depth_plot.pdf"):
"""Plot the number of samples against proportion of connections."""
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y="Proportion of connections",
data=df,
style="Max distance",
hue="Max distance",
ax=ax,
)
ax.set_xlabel("Number of samples", fontsize=LABELSIZE)
ax.set_ylabel("Expected proportion connected", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf(df, out_name, full=False):
"""Plot the pmf from the given dataframe."""
fig, ax = plt.subplots()
set_m()
x = df["Number of sampled connected neurons"]
y = df["Probability"]
ax.plot(x, y, "ko", ms=2.5)
y_vals_min = [0 for _ in x]
y_vals_max = y
colors = ["k" for _ in x]
if len(x) < 30:
ax.set_xticks([i for i in range(len(x))])
ax.set_xticklabels([i for i in range(len(x))])
if full:
ax.set_ylim([0, 1])
ax.vlines(x, y_vals_min, y_vals_max, colors=colors)
plt.xlabel("Number of sampled connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_connection_samples(df, out_name):
"""Plot the connection samples from the dataframe."""
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y="Proportion of connections",
data=df,
style="Max distance",
hue="Max distance",
ax=ax,
)
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel("Proportion of connections", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf_accuracy(df, out_name):
"""Plot the accuracy of the PMF from the dataframe."""
fig, ax = plt.subplots()
set_p()
calculation_vals = df["Calculation"].values
has_numbers = False
greater_than_one = False
hue = "Calculation"
for val in calculation_vals:
if val.endswith("1"):
has_numbers = True
if val.endswith("2"):
greater_than_one = True
if has_numbers:
df["Max geodesic distance"] = df.apply(
lambda row: str(row.Calculation.split(" ")[-1]), axis=1
)
df["Calculation"] = df.apply(lambda row: row.Calculation[:-2], axis=1)
hue = "Max geodesic distance"
df = df.sort_values(by=["Calculation"], ascending=False)
if greater_than_one:
sns.lineplot(
x="Number of connected neurons",
y="Probability",
hue=hue,
style="Calculation",
ci=None,
data=df,
ax=ax,
)
else:
sns.lineplot(
x="Number of connected neurons",
y="Probability",
hue="Calculation",
style="Calculation",
ci=None,
data=df,
ax=ax,
)
plt.xlabel("Number of connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_pmf_comp(dfs, names, out_name):
"""Plot the rate of convergence of the dataframe."""
df = None
for df_i, name_i in zip(dfs, names):
df_i["Connectivity"] = [name_i for _ in range(len(df_i))]
if df is None:
df = df_i
else:
df = df.append(df_i)
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of sampled connected neurons",
y="Probability",
hue="Connectivity",
style="Connectivity",
data=df,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of sampled connected neurons", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_exp_comp(dfs, names, out_name, prop=False):
"""Plot the accuracy of the expected value."""
df = None
for df_i, name_i in zip(dfs, names):
df_i["Connectivity"] = [name_i for _ in range(len(df_i))]
if df is None:
df = df_i
else:
df = df.append(df_i)
if prop:
y_name = "Expected proportion connected"
else:
y_name = "Expected connected"
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y=y_name,
data=df,
style="Connectivity",
hue="Connectivity",
ax=ax,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel(y_name, fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_exp_accuracy(df, out_name, prop=False, split=True):
"""Plot the accuracy of the expected value."""
if prop:
y_name = "Expected proportion connected"
else:
y_name = "Expected connected"
if split:
hue = "Max distance"
else:
hue = "Calculation"
fig, ax = plt.subplots()
set_p()
sns.lineplot(
x="Number of samples",
y=y_name,
data=df,
style="Calculation",
hue=hue,
ax=ax,
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of samples", fontsize=LABELSIZE)
plt.ylabel(y_name, fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_region_vals(df, out_name, x_name="Connectivity", scale=(10, 4)):
"""Plot region specific values from the dataframe."""
fig, ax = plt.subplots(figsize=scale)
set_p()
sns.barplot(
x=x_name,
y="Expected proportion connected",
hue="Calculation",
data=df,
ax=ax,
)
plt.xlabel(x_name, fontsize=LABELSIZE)
plt.ylabel("Expected proportion connected", fontsize=LABELSIZE)
despine()
save(fig=None, out_name=out_name)
def plot_region_sim(df, out_name, x_name="Connectivity", scale=(10, 4)):
"""Plot region specific values from the dataframe."""
fig, ax = plt.subplots(figsize=scale)
set_p()
sns.barplot(
x=x_name,
y="Bhattacharyya distance",
data=df,
ax=ax,
)
plt.xlabel(x_name, fontsize=LABELSIZE)
plt.ylabel("Bhattacharyya distance", fontsize=LABELSIZE)
despine()
save(fig=None, out_name=out_name)
def plot_distribution(dist, out_name):
"""Plot the pmf given by the distribution."""
fig, ax = plt.subplots()
set_p()
x = list(dist.keys())
y = list(dist.values())
ax.plot(x, y, "ko", ms=2.5)
y_vals_min = [0 for _ in x]
y_vals_max = y
colors = ["k" for _ in x]
ax.vlines(x, y_vals_min, y_vals_max, colors=colors)
plt.xlabel("Value", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_name)
def plot_acc_interp(x_samps, interped_vals, xvals, yvals, out_name, true_y=None):
"""Plot the accuracy of interpolation."""
fig, ax = plt.subplots()
set_p()
ax.plot(x_samps, interped_vals, c="b", linestyle="-", label="interp")
ax.plot(xvals, yvals, "gx", label="samples", markersize="3.0")
if true_y is not None:
ax.plot(x_samps, true_y, c="r", linestyle="--", label="true")
plt.legend()
plt.xlabel("Number of receivers in B", fontsize=LABELSIZE)
plt.ylabel("Weighted probability", fontsize=LABELSIZE)
save(fig, out_name)
def plot_dist_explain(dfs, out_names):
"""Plot the explanation of computing the distributions."""
fig, ax = plt.subplots()
set_p()
df = dfs[0]
sns.lineplot(x="Number of sampled senders", y="Probability", ax=ax, data=df)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
plt.xlabel("Number of sampled senders", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[0])
fig, ax = plt.subplots(figsize=(7, 5.2))
set_p()
df = dfs[1]
sns.lineplot(x="Number of receivers", y="Probability", ax=ax, data=df)
plt.xlabel("Number of receivers", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[1])
fig, ax = plt.subplots(figsize=(7, 5.2))
set_p()
df = dfs[2]
st = [0, 4, 8, 12, 16, 20]
conds = []
for val in st:
conds.append(df["Number of sampled A"] == val)
final_filt = conds[0]
for val in conds[1:]:
final_filt = final_filt | val
sns.lineplot(
x="Number of receivers",
y="Probability",
ax=ax,
data=df[final_filt].astype({"Number of sampled A": "str"}),
style="Number of sampled A",
hue="Number of sampled A",
)
plt.xlabel("Number of receivers", fontsize=LABELSIZE)
plt.ylabel("Weighted probability", fontsize=LABELSIZE)
despine()
save(fig, out_names[2])
fig, ax = plt.subplots()
set_p()
df = dfs[3]
sns.lineplot(x="Number of sampled receivers", y="Probability", ax=ax, data=df)
plt.xlabel("Number of sampled receivers", fontsize=LABELSIZE)
plt.ylabel("Probability", fontsize=LABELSIZE)
ax.xaxis.set_major_locator(MaxNLocator(nbins=11, integer=True, min_n_ticks=10))
despine()
save(fig, out_names[3])
def main():
"""Defines the plots performed in produce_figures - without performing analysis."""
print("Starting main plotting")
# Mouse plots
plot_samples_v_prop(load_df("MOp_to_SSP-ll_depth.csv"), "mouse_samps.pdf")
plot_region_vals(
load_df("mouse_region_exp_fig.csv"),
"mouse_region_exp.pdf",
x_name="Regions",
scale=(12, 5),
)
# Accuracy plots
plot_exp_accuracy(
load_df("connection_samples_fig.csv"), "samples_acc.pdf", prop=True
)
plot_pmf_accuracy(load_df("pmf_comp_fig.csv"), "d3_acc.pdf")
plot_pmf_accuracy(load_df("pmf_comp_pmf.csv"), "pmf_acc.pdf")
plot_region_vals(load_df("exp_man.csv"), "region_acc_man.pdf", scale=(12, 5))
plot_pmf_accuracy(load_df("MOp_to_SSP-ll_pmf_final_1_79.csv"), "pmf_mouse_acc.pdf")
plot_exp_accuracy(
load_df("total_b_exp_fig.csv"), "exp_total_b.pdf", prop=True, split=False
)
# Example plots - HC/SUB
plot_pmf(load_df("tetrode_man.csv"), "tetrode_pmf.pdf")
plot_pmf(load_df("npix_man.csv"), "npix_pmf.pdf")
plot_exp_comp(
load_df("connection_samples_hc_high.csv"),
load_df("connection_samples_hc_low.csv"),
"samples_hc_both.pdf",
prop=True,
)
plot_pmf_comp(
load_df("tetrode_sub_high.csv"),
load_df("tetrode_sub_low.csv"),
"ca1_sub_tet_comp.pdf",
)
# Explanation figures - mostly done in other function
plot_dist_explain(
[
load_df("a_prob_eg.csv"),
load_df("b_prob_eg.csv"),
load_df("b_each_eg.csv"),
load_df("b_fin_eg.csv"),
],
[
"a_prob_eg.pdf",
"b_prob_eg.pdf",
"b_each_eg.pdf",
"b_fin_eg.pdf",
],
)
if __name__ == "__main__":
main()
| en | 0.76552 | Plotting functions. Load a pandas dataframe from csv file at results/name. Despine the current plot with trimming. Set the seaborn palette. # sns.set_context( # "paper", # rc={ # "axes.titlesize": 18, # "axes.labelsize": 14, # "lines.linewidth": 2, # }, # ) Save the figure to figures/out_name. Plot the number of samples against proportion of connections. Plot the pmf from the given dataframe. Plot the connection samples from the dataframe. Plot the accuracy of the PMF from the dataframe. Plot the rate of convergence of the dataframe. Plot the accuracy of the expected value. Plot the accuracy of the expected value. Plot region specific values from the dataframe. Plot region specific values from the dataframe. Plot the pmf given by the distribution. Plot the accuracy of interpolation. Plot the explanation of computing the distributions. Defines the plots performed in produce_figures - without performing analysis. # Mouse plots # Accuracy plots # Example plots - HC/SUB # Explanation figures - mostly done in other function | 3.020045 | 3 |
kubernetes_typed/client/models/v1_job.py | nikhiljha/kubernetes-typed | 22 | 6624871 | <reponame>nikhiljha/kubernetes-typed<gh_stars>10-100
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1JobDict generated type."""
from typing import TypedDict
from kubernetes_typed.client import V1JobSpecDict, V1JobStatusDict, V1ObjectMetaDict
V1JobDict = TypedDict(
"V1JobDict",
{
"apiVersion": str,
"kind": str,
"metadata": V1ObjectMetaDict,
"spec": V1JobSpecDict,
"status": V1JobStatusDict,
},
total=False,
)
| # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1JobDict generated type."""
from typing import TypedDict
from kubernetes_typed.client import V1JobSpecDict, V1JobStatusDict, V1ObjectMetaDict
V1JobDict = TypedDict(
"V1JobDict",
{
"apiVersion": str,
"kind": str,
"metadata": V1ObjectMetaDict,
"spec": V1JobSpecDict,
"status": V1JobStatusDict,
},
total=False,
) | en | 0.397533 | # Code generated by `typeddictgen`. DO NOT EDIT. V1JobDict generated type. | 1.188471 | 1 |
clevrtex-gen/blender_utils.py | karazijal/clevrtex-generation | 17 | 6624872 | from pathlib import Path
import bpy
import bpy_extras
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
x, y, z = bpy_extras.object_utils.world_to_camera_view(bpy.context.scene, cam, pos)
scale = bpy.context.scene.render.resolution_percentage / 100.0
w = int(scale * bpy.context.scene.render.resolution_x)
h = int(scale * bpy.context.scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def configure_cycles(output_path, width, height, tile_size, num_samples, min_bounces, max_bounces, use_gpu=False):
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.render.filepath = str(output_path)
bpy.context.scene.render.resolution_x = width
bpy.context.scene.render.resolution_y = height
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.tile_x = tile_size
bpy.context.scene.render.tile_y = tile_size
if use_gpu:
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
for d in bpy.context.preferences.addons["cycles"].preferences.devices:
d["use"] = 1
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = num_samples
bpy.context.scene.cycles.transparent_min_bounces = min_bounces
bpy.context.scene.cycles.transparent_max_bounces = max_bounces
def compositor_output(out_socket, node_tree, output_path_prefix, set_bw=False, mkdir=True, frame_id=1):
out_dir = Path(output_path_prefix).parent
if mkdir:
out_dir.mkdir(parents=True, exist_ok=True)
elif not out_dir.exists() or not out_dir.is_dir():
raise RuntimeError(f'Cannot output to {out_dir}')
out_name = Path(output_path_prefix).name
output_node = node_tree.nodes.new('CompositorNodeOutputFile')
output_node.base_path = str(out_dir)
output_node.file_slots[0].path = str(out_name)
if set_bw:
output_node.format.color_mode = 'BW'
output_node.file_slots[0].format.color_mode = 'BW'
node_tree.links.new(
out_socket,
output_node.inputs['Image']
)
return out_dir / f"{out_name}{frame_id:0>4d}.png"
def compositor_obj_mask_output(out_socket, node_tree, obj_index, output_path_prefix, mkdir=True, frame_id=1):
id_mask_node = node_tree.nodes.new('CompositorNodeIDMask')
id_mask_node.index = obj_index
id_mask_node.use_antialiasing = False
node_tree.links.new(
out_socket,
id_mask_node.inputs['ID value']
)
output_path_prefix = str(output_path_prefix)
if output_path_prefix.endswith('.png'):
output_path_prefix = output_path_prefix[:-4]
output_path_prefix += f'_o{obj_index:0>2d}_'
return compositor_output(id_mask_node.outputs['Alpha'], node_tree, output_path_prefix, set_bw=True, mkdir=mkdir,
frame_id=frame_id)
def add_object_geometry(object_path, scale, loc, rotation=0):
"""
Load an object from a file. object_path points to .blend file with
object_path.stem object inside the scene, with scale 0 and positions at the origin
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) location on the ground plane
- rotation: scalar rotation to apply to the object
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
object_path = Path(object_path)
name = object_path.stem
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = object_path / 'Object' / name
bpy.ops.wm.append(filename=str(filename))
# Give it a new name to avoid conflicts
new_name = f'{name}_{count}'
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
o = bpy.data.objects[new_name]
o.select_set(state=True, view_layer=bpy.context.view_layer)
bpy.context.view_layer.objects.active = o
bpy.context.object.rotation_euler[2] = rotation # Rotate around z
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
o.select_set(state=False, view_layer=bpy.context.view_layer)
def load_material(path):
path = Path(path)
filepath = path / 'NodeTree' / path.stem
bpy.ops.wm.append(filename=str(filepath))
def add_material(obj, mat_path, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Sometime Displacement is called Displacement Strength
if 'Displacement' in properties:
properties['Displacement Strength'] = properties['Displacement']
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
names = {m.name for m in bpy.data.materials}
name = mat_path.stem
mat_name = mat_path.stem
if name in names:
idx = sum(1 for m in bpy.data.materials if m.name.startswith(name))
mat_name = name + f'_{idx + 1}'
# Create a new material
mat = bpy.data.materials.new(mat_name)
mat.name = mat_name
mat.use_nodes = True
mat.cycles.displacement_method = 'BOTH'
# Attach the new material to the object
# Make sure it doesn't already have materials
assert len(
obj.data.materials) == 0, f"{obj.name} has multiple materials ({', '.join(m.name for m in obj.data.materials if m is not None)}), adding {name} will fail"
obj.data.materials.append(mat)
mat.node_tree.links.clear()
mat.node_tree.nodes.clear()
output_node = mat.node_tree.nodes.new('ShaderNodeOutputMaterial')
output_node.is_active_output = True
# Add a new GroupNode to the node tree of the active material,
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
if name not in bpy.data.node_groups:
load_material(mat_path)
group_node.node_tree = bpy.data.node_groups[name]
# Also this seems to be the only way to copy a node tree in the headless mode
# Wire first by-name then by preset names, to the group outputs to the material output
for out_socket in group_node.outputs:
if out_socket.name in output_node.inputs:
mat.node_tree.links.new(
group_node.outputs[out_socket.name],
output_node.inputs[out_socket.name],
)
else:
# print(f"{out_socket.name} not found in the output of the material")
pass
if not output_node.inputs['Surface'].is_linked:
if 'Shader' in group_node.outputs and not group_node.outputs['Shader'].is_linked:
# print(f"Unlinked Surface socket in the material output; trying to fill with Shader socket of the group")
mat.node_tree.links.new(
group_node.outputs["Shader"],
output_node.inputs["Surface"],
)
elif 'BSDF' in group_node.outputs and not group_node.outputs['BSDF'].is_linked:
# print(f"Unlinked Surface socket in the material output; trying to fill with BSDF socket of the group")
mat.node_tree.links.new(
group_node.outputs["BSDF"],
output_node.inputs["Surface"],
)
else:
raise ValueError(f"Cannot resolve material output for {mat.name}")
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
return mat
def add_shadeless_nodes_to_material(mat, shadeless_clr):
"""
Inject nodes to the material tree required for rendering solid colour output
"""
mix_node = mat.node_tree.nodes.new('ShaderNodeMixShader')
mix_node.name = 'InjectedShadelessMix'
dif_node = mat.node_tree.nodes.new('ShaderNodeBsdfDiffuse')
dif_node.name = 'InjectedShadelessDif'
lit_node = mat.node_tree.nodes.new('ShaderNodeLightPath')
lit_node.name = 'InjectedShadelessLit'
emi_node = mat.node_tree.nodes.new('ShaderNodeEmission')
emi_node.name = 'InjectedShadelessEmission'
emi_node.inputs['Color'].default_value = shadeless_clr
l1 = mat.node_tree.links.new(
lit_node.outputs['Is Camera Ray'],
mix_node.inputs['Fac'],
)
l2 = mat.node_tree.links.new(
dif_node.outputs['BSDF'],
mix_node.inputs[1],
)
l3 = mat.node_tree.links.new(
emi_node.outputs['Emission'],
mix_node.inputs[2],
)
return mix_node, (mix_node, dif_node, lit_node, emi_node), (l1, l2, l3)
def set_to_shadeless(mat, shadeless_clr):
"""
Rewire the material to output solid colour <shadeless_clr>.
Returns a callback that returns the material to original state
"""
# print(f"Setting {mat.name} to shadeless")
# Locate output node
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
else:
raise ValueError(f"Could not locate output node in {mat.name} Material")
socket = None
if output_node.inputs['Surface'].is_linked:
l = None
for link in mat.node_tree.links:
if link.to_socket == output_node.inputs['Surface']:
l = link
break
else:
raise ValueError(f"Could not locate output node Surface link in {mat.name} Material")
socket = l.from_socket.node.outputs[l.from_socket.name] # Lets hope there not multiple with the same name
# print(f"Will try to restore link between {l.from_socket.node.name}:{l.from_socket.name} {socket}")
mat.node_tree.links.remove(l)
# Check that shadeless rendering nodes have not already been injected to this material
mix_node = None
for n in mat.node_tree.nodes:
if n.name == 'InjectedShadelessMix':
mix_node = n
break
if mix_node is None:
# Inject the nodes
mix_node, nodes, links = add_shadeless_nodes_to_material(mat, shadeless_clr)
else:
# If they already exist; just set the colour to the correct value
nodes = []
for n in mat.node_tree.nodes:
if n.name == 'InjectedShadelessEmission':
n.inputs['Color'].default_value = shadeless_clr
if n.name.startswith('InjectedShadeless'):
nodes.append(n)
links = set()
for n in nodes:
for s in n.inputs:
if s.is_linked:
for l in s.links:
links.add(l)
# Check and correct the node connection
if mix_node.outputs['Shader'].is_linked:
# Check and reset the node links between Shadeless mix node and the material output
offending_link = None
for link in mat.node_tree.links:
if link.from_socker.node == mix_node:
offending_link = link
break
else:
raise ValueError(f"Could not locate offending mix_shader link in the {mat.name} Material")
mat.node_tree.links.remove(offending_link)
temp_link = mat.node_tree.links.new(
mix_node.outputs['Shader'],
output_node.inputs['Surface'],
)
def undo_callback():
mat.node_tree.links.remove(temp_link)
if socket:
mat.node_tree.links.new(socket, output_node.inputs['Surface'])
# print(f"Reverting {mat.name} to the original")
for l in links:
mat.node_tree.links.remove(l)
for n in nodes:
mat.node_tree.nodes.remove(n)
return undo_callback
def dump_mat(mat):
print(f"Material {mat.name}")
nt = mat.node_tree
for n in nt.nodes:
print('\t', n.name, n.label, )
for i in n.inputs:
print('\t\t>', i.name,
f'<{i.links[0].from_socket.node.name}:{i.links[0].from_socket.name}' if len(i.links) else '')
| from pathlib import Path
import bpy
import bpy_extras
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
x, y, z = bpy_extras.object_utils.world_to_camera_view(bpy.context.scene, cam, pos)
scale = bpy.context.scene.render.resolution_percentage / 100.0
w = int(scale * bpy.context.scene.render.resolution_x)
h = int(scale * bpy.context.scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def configure_cycles(output_path, width, height, tile_size, num_samples, min_bounces, max_bounces, use_gpu=False):
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.render.filepath = str(output_path)
bpy.context.scene.render.resolution_x = width
bpy.context.scene.render.resolution_y = height
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.tile_x = tile_size
bpy.context.scene.render.tile_y = tile_size
if use_gpu:
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.context.scene.cycles.device = 'GPU'
for d in bpy.context.preferences.addons["cycles"].preferences.devices:
d["use"] = 1
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = num_samples
bpy.context.scene.cycles.transparent_min_bounces = min_bounces
bpy.context.scene.cycles.transparent_max_bounces = max_bounces
def compositor_output(out_socket, node_tree, output_path_prefix, set_bw=False, mkdir=True, frame_id=1):
out_dir = Path(output_path_prefix).parent
if mkdir:
out_dir.mkdir(parents=True, exist_ok=True)
elif not out_dir.exists() or not out_dir.is_dir():
raise RuntimeError(f'Cannot output to {out_dir}')
out_name = Path(output_path_prefix).name
output_node = node_tree.nodes.new('CompositorNodeOutputFile')
output_node.base_path = str(out_dir)
output_node.file_slots[0].path = str(out_name)
if set_bw:
output_node.format.color_mode = 'BW'
output_node.file_slots[0].format.color_mode = 'BW'
node_tree.links.new(
out_socket,
output_node.inputs['Image']
)
return out_dir / f"{out_name}{frame_id:0>4d}.png"
def compositor_obj_mask_output(out_socket, node_tree, obj_index, output_path_prefix, mkdir=True, frame_id=1):
id_mask_node = node_tree.nodes.new('CompositorNodeIDMask')
id_mask_node.index = obj_index
id_mask_node.use_antialiasing = False
node_tree.links.new(
out_socket,
id_mask_node.inputs['ID value']
)
output_path_prefix = str(output_path_prefix)
if output_path_prefix.endswith('.png'):
output_path_prefix = output_path_prefix[:-4]
output_path_prefix += f'_o{obj_index:0>2d}_'
return compositor_output(id_mask_node.outputs['Alpha'], node_tree, output_path_prefix, set_bw=True, mkdir=mkdir,
frame_id=frame_id)
def add_object_geometry(object_path, scale, loc, rotation=0):
"""
Load an object from a file. object_path points to .blend file with
object_path.stem object inside the scene, with scale 0 and positions at the origin
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) location on the ground plane
- rotation: scalar rotation to apply to the object
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
object_path = Path(object_path)
name = object_path.stem
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = object_path / 'Object' / name
bpy.ops.wm.append(filename=str(filename))
# Give it a new name to avoid conflicts
new_name = f'{name}_{count}'
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
o = bpy.data.objects[new_name]
o.select_set(state=True, view_layer=bpy.context.view_layer)
bpy.context.view_layer.objects.active = o
bpy.context.object.rotation_euler[2] = rotation # Rotate around z
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
o.select_set(state=False, view_layer=bpy.context.view_layer)
def load_material(path):
path = Path(path)
filepath = path / 'NodeTree' / path.stem
bpy.ops.wm.append(filename=str(filepath))
def add_material(obj, mat_path, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Sometime Displacement is called Displacement Strength
if 'Displacement' in properties:
properties['Displacement Strength'] = properties['Displacement']
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
names = {m.name for m in bpy.data.materials}
name = mat_path.stem
mat_name = mat_path.stem
if name in names:
idx = sum(1 for m in bpy.data.materials if m.name.startswith(name))
mat_name = name + f'_{idx + 1}'
# Create a new material
mat = bpy.data.materials.new(mat_name)
mat.name = mat_name
mat.use_nodes = True
mat.cycles.displacement_method = 'BOTH'
# Attach the new material to the object
# Make sure it doesn't already have materials
assert len(
obj.data.materials) == 0, f"{obj.name} has multiple materials ({', '.join(m.name for m in obj.data.materials if m is not None)}), adding {name} will fail"
obj.data.materials.append(mat)
mat.node_tree.links.clear()
mat.node_tree.nodes.clear()
output_node = mat.node_tree.nodes.new('ShaderNodeOutputMaterial')
output_node.is_active_output = True
# Add a new GroupNode to the node tree of the active material,
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
if name not in bpy.data.node_groups:
load_material(mat_path)
group_node.node_tree = bpy.data.node_groups[name]
# Also this seems to be the only way to copy a node tree in the headless mode
# Wire first by-name then by preset names, to the group outputs to the material output
for out_socket in group_node.outputs:
if out_socket.name in output_node.inputs:
mat.node_tree.links.new(
group_node.outputs[out_socket.name],
output_node.inputs[out_socket.name],
)
else:
# print(f"{out_socket.name} not found in the output of the material")
pass
if not output_node.inputs['Surface'].is_linked:
if 'Shader' in group_node.outputs and not group_node.outputs['Shader'].is_linked:
# print(f"Unlinked Surface socket in the material output; trying to fill with Shader socket of the group")
mat.node_tree.links.new(
group_node.outputs["Shader"],
output_node.inputs["Surface"],
)
elif 'BSDF' in group_node.outputs and not group_node.outputs['BSDF'].is_linked:
# print(f"Unlinked Surface socket in the material output; trying to fill with BSDF socket of the group")
mat.node_tree.links.new(
group_node.outputs["BSDF"],
output_node.inputs["Surface"],
)
else:
raise ValueError(f"Cannot resolve material output for {mat.name}")
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
return mat
def add_shadeless_nodes_to_material(mat, shadeless_clr):
"""
Inject nodes to the material tree required for rendering solid colour output
"""
mix_node = mat.node_tree.nodes.new('ShaderNodeMixShader')
mix_node.name = 'InjectedShadelessMix'
dif_node = mat.node_tree.nodes.new('ShaderNodeBsdfDiffuse')
dif_node.name = 'InjectedShadelessDif'
lit_node = mat.node_tree.nodes.new('ShaderNodeLightPath')
lit_node.name = 'InjectedShadelessLit'
emi_node = mat.node_tree.nodes.new('ShaderNodeEmission')
emi_node.name = 'InjectedShadelessEmission'
emi_node.inputs['Color'].default_value = shadeless_clr
l1 = mat.node_tree.links.new(
lit_node.outputs['Is Camera Ray'],
mix_node.inputs['Fac'],
)
l2 = mat.node_tree.links.new(
dif_node.outputs['BSDF'],
mix_node.inputs[1],
)
l3 = mat.node_tree.links.new(
emi_node.outputs['Emission'],
mix_node.inputs[2],
)
return mix_node, (mix_node, dif_node, lit_node, emi_node), (l1, l2, l3)
def set_to_shadeless(mat, shadeless_clr):
"""
Rewire the material to output solid colour <shadeless_clr>.
Returns a callback that returns the material to original state
"""
# print(f"Setting {mat.name} to shadeless")
# Locate output node
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
else:
raise ValueError(f"Could not locate output node in {mat.name} Material")
socket = None
if output_node.inputs['Surface'].is_linked:
l = None
for link in mat.node_tree.links:
if link.to_socket == output_node.inputs['Surface']:
l = link
break
else:
raise ValueError(f"Could not locate output node Surface link in {mat.name} Material")
socket = l.from_socket.node.outputs[l.from_socket.name] # Lets hope there not multiple with the same name
# print(f"Will try to restore link between {l.from_socket.node.name}:{l.from_socket.name} {socket}")
mat.node_tree.links.remove(l)
# Check that shadeless rendering nodes have not already been injected to this material
mix_node = None
for n in mat.node_tree.nodes:
if n.name == 'InjectedShadelessMix':
mix_node = n
break
if mix_node is None:
# Inject the nodes
mix_node, nodes, links = add_shadeless_nodes_to_material(mat, shadeless_clr)
else:
# If they already exist; just set the colour to the correct value
nodes = []
for n in mat.node_tree.nodes:
if n.name == 'InjectedShadelessEmission':
n.inputs['Color'].default_value = shadeless_clr
if n.name.startswith('InjectedShadeless'):
nodes.append(n)
links = set()
for n in nodes:
for s in n.inputs:
if s.is_linked:
for l in s.links:
links.add(l)
# Check and correct the node connection
if mix_node.outputs['Shader'].is_linked:
# Check and reset the node links between Shadeless mix node and the material output
offending_link = None
for link in mat.node_tree.links:
if link.from_socker.node == mix_node:
offending_link = link
break
else:
raise ValueError(f"Could not locate offending mix_shader link in the {mat.name} Material")
mat.node_tree.links.remove(offending_link)
temp_link = mat.node_tree.links.new(
mix_node.outputs['Shader'],
output_node.inputs['Surface'],
)
def undo_callback():
mat.node_tree.links.remove(temp_link)
if socket:
mat.node_tree.links.new(socket, output_node.inputs['Surface'])
# print(f"Reverting {mat.name} to the original")
for l in links:
mat.node_tree.links.remove(l)
for n in nodes:
mat.node_tree.nodes.remove(n)
return undo_callback
def dump_mat(mat):
print(f"Material {mat.name}")
nt = mat.node_tree
for n in nt.nodes:
print('\t', n.name, n.label, )
for i in n.inputs:
print('\t\t>', i.name,
f'<{i.links[0].from_socket.node.name}:{i.links[0].from_socket.name}' if len(i.links) else '')
| en | 0.819026 | For a specified point, get both the 3D coordinates and 2D pixel-space coordinates of the point from the perspective of the camera. Inputs: - cam: Camera object - pos: Vector giving 3D world-space position Returns a tuple of: - (px, py, pz): px and py give 2D image-space coordinates; pz gives depth in the range [-1, 1] Load an object from a file. object_path points to .blend file with object_path.stem object inside the scene, with scale 0 and positions at the origin - scale: scalar giving the size that the object should be in the scene - loc: tuple (x, y) location on the ground plane - rotation: scalar rotation to apply to the object # First figure out how many of this object are already in the scene so we can # give the new object a unique name # Give it a new name to avoid conflicts # Set the new object as active, then rotate, scale, and translate it # Rotate around z Create a new material and assign it to the active object. "name" should be the name of a material that has been previously loaded using load_materials. # Sometime Displacement is called Displacement Strength # Figure out how many materials are already in the scene # Create a new material # Attach the new material to the object # Make sure it doesn't already have materials # Add a new GroupNode to the node tree of the active material, # Also this seems to be the only way to copy a node tree in the headless mode # Wire first by-name then by preset names, to the group outputs to the material output # print(f"{out_socket.name} not found in the output of the material") # print(f"Unlinked Surface socket in the material output; trying to fill with Shader socket of the group") # print(f"Unlinked Surface socket in the material output; trying to fill with BSDF socket of the group") Inject nodes to the material tree required for rendering solid colour output Rewire the material to output solid colour <shadeless_clr>. Returns a callback that returns the material to original state # print(f"Setting {mat.name} to shadeless") # Locate output node # Lets hope there not multiple with the same name # print(f"Will try to restore link between {l.from_socket.node.name}:{l.from_socket.name} {socket}") # Check that shadeless rendering nodes have not already been injected to this material # Inject the nodes # If they already exist; just set the colour to the correct value # Check and correct the node connection # Check and reset the node links between Shadeless mix node and the material output # print(f"Reverting {mat.name} to the original") | 3.035457 | 3 |
asn/urls.py | wh8983298/GreaterWMS | 1,063 | 6624873 | from django.urls import path, re_path
from . import views
urlpatterns = [
path(r'list/', views.AsnListViewSet.as_view({"get": "list", "post": "create"}), name="asnlist"),
re_path(r'^list/(?P<pk>\d+)/$', views.AsnListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="asnlist_1"),
path(r'detail/', views.AsnDetailViewSet.as_view({"get": "list", "post": "create", 'put': 'update'}), name="asndetail"),
re_path(r'^detail/(?P<pk>\d+)/$', views.AsnDetailViewSet.as_view({
'get': 'retrieve',
}), name="asndetail_1"),
re_path(r'^viewprint/(?P<pk>\d+)/$', views.AsnViewPrintViewSet.as_view({
'get': 'retrieve',
}), name="asnviewprint_1"),
re_path(r'^preload/(?P<pk>\d+)/$', views.AsnPreLoadViewSet.as_view({
'post': 'create',
}), name="preload_1"),
re_path(r'^presort/(?P<pk>\d+)/$', views.AsnPreSortViewSet.as_view({
'post': 'create',
}), name="presort_1"),
re_path(r'^sorted/(?P<pk>\d+)/$', views.AsnSortedViewSet.as_view({
'post': 'create',
}), name="sorted_1"),
re_path(r'^movetobin/(?P<pk>\d+)/$', views.MoveToBinViewSet.as_view({
'post': 'create',
}), name="movetobin_1"),
path(r'filelist/', views.FileListDownloadView.as_view({"get": "list"}), name="asnfilelistdownload"),
path(r'filedetail/', views.FileDetailDownloadView.as_view({"get": "list"}), name="asnfiledetaildownload")
]
| from django.urls import path, re_path
from . import views
urlpatterns = [
path(r'list/', views.AsnListViewSet.as_view({"get": "list", "post": "create"}), name="asnlist"),
re_path(r'^list/(?P<pk>\d+)/$', views.AsnListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="asnlist_1"),
path(r'detail/', views.AsnDetailViewSet.as_view({"get": "list", "post": "create", 'put': 'update'}), name="asndetail"),
re_path(r'^detail/(?P<pk>\d+)/$', views.AsnDetailViewSet.as_view({
'get': 'retrieve',
}), name="asndetail_1"),
re_path(r'^viewprint/(?P<pk>\d+)/$', views.AsnViewPrintViewSet.as_view({
'get': 'retrieve',
}), name="asnviewprint_1"),
re_path(r'^preload/(?P<pk>\d+)/$', views.AsnPreLoadViewSet.as_view({
'post': 'create',
}), name="preload_1"),
re_path(r'^presort/(?P<pk>\d+)/$', views.AsnPreSortViewSet.as_view({
'post': 'create',
}), name="presort_1"),
re_path(r'^sorted/(?P<pk>\d+)/$', views.AsnSortedViewSet.as_view({
'post': 'create',
}), name="sorted_1"),
re_path(r'^movetobin/(?P<pk>\d+)/$', views.MoveToBinViewSet.as_view({
'post': 'create',
}), name="movetobin_1"),
path(r'filelist/', views.FileListDownloadView.as_view({"get": "list"}), name="asnfilelistdownload"),
path(r'filedetail/', views.FileDetailDownloadView.as_view({"get": "list"}), name="asnfiledetaildownload")
]
| none | 1 | 2.071627 | 2 | |
opensitua_core/strings.py | valluzzi/opensitua_core | 0 | 6624874 | # -----------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2019 <NAME>
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: strings.py
# Purpose:
#
# Author: <NAME>
#
# Created: 27/12/2012
# -----------------------------------------------------------------------------
import re
import six
import random
def isstring(var):
"""
isstring - Returns True if the variable is a string
"""
#return isinstance(var, (str, unicode)) #Python2
return isinstance(var, six.string_types) #Python2 Python3
def isarray(var):
"""
isarray - Returns True if the variable is a list
"""
return isinstance(var, (list, tuple))
def isnumeric(text):
"""
isnumeric - say yes if it'a number
"""
match = re.match("^[-+]?((\d+(\.\d*)?)|(\d*\.\d+))([eE][-+]?\d+)?$", text.strip())
return True if match else False
def isquery(text):
"""
isquery
"""
pattern = r'^\s*((SELECT|PRAGMA|INSERT|DELETE|REPLACE|UPDATE|CREATE).*)'
res = re.match(pattern, text, re.IGNORECASE)
return True if res else False
def sformat(text, args):
"""
sformat
"""
args = args if args else {}
for key in args:
text = text.replace("{%s}" % key, "%s" % (args[key]))
return text
def lower(text):
"""
lower
"""
if isstring(text):
return text.lower()
elif isarray(text):
return [lower(item) for item in text]
return ""
def upper(text):
"""
upper
"""
if isstring(text):
return text.upper()
elif isarray(text):
return [upper(item) for item in text]
return ""
def padr(text, n, c):
"""
padr - right pad of text with character c
"""
text = str(text)
return text + str(c) * (n - len(text))
def padl(text, n, c):
"""
left pad of text with character c
"""
text = str(text)
return str(c) * (n - len(text)) + text
def trim(text, toremove=' '):
"""
trim - trim all array
"""
toremove = toremove[0]
if isstring(text):
return text.strip(toremove)
elif isarray(text):
return [trim(item, toremove) for item in text if len(item) > 0]
return text
def ltrim(text, toremove):
"""
ltrim - left trim
"""
toremove = toremove[0]
if isstring(text):
return text.lstrip(toremove)
elif isarray(text):
return [ltrim(item, toremove) for item in text if len(item) > 0]
return text
def chrtran(text, tosearch, toreplace):
"""
chrtran
"""
for j in range(0, len(tosearch)):
c = toreplace[j] if j in range(0, len(toreplace)) else ""
text = text.replace(tosearch[j], c)
return text
def startswith(text, elenco, casesensitive=True):
"""
startswith - Returns True if the text starts with one of ...
"""
for item in listify(elenco, ","):
if casesensitive:
if text.startswith(item):
return True
else:
if text.lower().startswith(item.lower()):
return True
return False
def endswith(text, elenco, casesensitive=True):
"""
endswith - Returns True if the text ends with one of ...
"""
for item in listify(elenco, ","):
if casesensitive:
if text.endswith(item):
return True
else:
if text.lower().endswith(item.lower()):
return True
return False
def leftpart(text, sep, included = False):
"""
leftpart
"""
if isstring(text):
arr = text.split(sep, 1)
if len(arr) >= 1:
return arr[0] + sep if included else arr[0]
elif isarray(text):
return [leftpart(item, sep, included) for item in text]
def rightpart(text, sep, included = False):
"""
rightpart
"""
if isstring(text):
arr = text.split(sep, 1)
if len(arr) > 1:
return sep + arr[1] if included else arr[1]
elif isarray(text):
return [rightpart(item, sep, included) for item in text]
return ""
def tempname(prefix="", postfix="", ext=""):
"""
tempname -returns a temporary name
"""
uid = random.randint(0,1e6)
ext = "."+ext if ext else ""
return "%s%s%s%s"%(prefix,uid,postfix,ext)
def textin(text, prefix, postfix, casesensitive=True):
"""
textin - return text between prefix and suffix excluded
"""
if casesensitive:
g = re.search(r'(?<=' + prefix + ')(.*?)(?=' + postfix + ')', text)
else:
g = re.search(r'(?<=' + prefix + ')(.*?)(?=' + postfix + ')', text, re.IGNORECASE)
return g.group() if g else ""
def textbetween(text, prefix, postfix, casesensitive=True):
"""
textin - return text between prefix and suffix excluded
"""
if casesensitive:
g = re.search(r'' + prefix + '(.*?)' + postfix, text, re.DOTALL)
else:
g = re.search(r'' + prefix + '(.*?)' + postfix, text, re.IGNORECASE|re.DOTALL)
return g.group() if g else ""
def normalizestring(text):
"""
normalizestring
"""
return re.sub(r'\s+', ' ', text)
def wrap(text, leftc, rightc=None):
"""
wrap
"""
if isstring(text):
rightc = leftc if rightc is None else rightc
return leftc + text + rightc
elif isarray(text):
return [wrap(item, leftc, rightc) for item in text]
def unwrap(text, leftc, rightc=None):
"""
unwrap
"""
if isstring(text):
rightc = leftc if rightc is None else rightc
start = len(leftc)
end = len(rightc)
while text.startswith(leftc) and text.endswith(rightc):
text = text[start:-end]
return text
elif isarray(text):
return [unwrap(item, leftc, rightc) for item in text]
def split(text, sep=" ", glue="'", removeEmpty=False):
"""
split - a variant of split with glue characters
"""
res = []
word = ""
dontsplit = False
lookahead = len(sep)
for j in range(0, len(text)):
c = text[j]
ca = text[j:j+lookahead]
if c in glue:
dontsplit = not dontsplit
if ca == sep and not dontsplit:
res.append(word)
word = ""
else:
word += c
if not removeEmpty or len(word.strip()) > 0:
res.append(word)
return res
def listify(text, sep=",", glue="\""):
"""
listify - make a list from string
"""
if text is None:
return []
elif isstring(text):
return split(text, sep, glue, removeEmpty=True)
elif isarray(text):
return text
return [text]
def arr2dict(arr,keyname="key",valuename="value"):
"""
arr2dict - transform an array to dictionary key:
"""
res = {}
for item in arr:
res[item[keyname]]= item[valuename] if valuename in item else None
return res
def mapify(text, sep=",", kvsep="=", strip_char=" ", glue= "\"", parsing=False):
"""
Growup a dictionary from text string
"""
# text = "hello=world,good=bye"
items = listify(text, sep, glue)
res = {}
for item in items:
item = item.strip(strip_char)
arr = item.split(kvsep, 1)
if len(arr)==1:
key, value = arr[0], ""
elif len(arr)==2:
key, value = arr
key, value = key.strip(strip_char).strip(glue), value.strip(strip_char).strip(glue)
if parsing:
#value = parseValue(value)
value = (value)
res[key] = value
return res
def replaceAll(text, search, replace):
"""
replaceAll
"""
return re.sub(text, search, replace )
| # -----------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2019 <NAME>
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: strings.py
# Purpose:
#
# Author: <NAME>
#
# Created: 27/12/2012
# -----------------------------------------------------------------------------
import re
import six
import random
def isstring(var):
"""
isstring - Returns True if the variable is a string
"""
#return isinstance(var, (str, unicode)) #Python2
return isinstance(var, six.string_types) #Python2 Python3
def isarray(var):
"""
isarray - Returns True if the variable is a list
"""
return isinstance(var, (list, tuple))
def isnumeric(text):
"""
isnumeric - say yes if it'a number
"""
match = re.match("^[-+]?((\d+(\.\d*)?)|(\d*\.\d+))([eE][-+]?\d+)?$", text.strip())
return True if match else False
def isquery(text):
"""
isquery
"""
pattern = r'^\s*((SELECT|PRAGMA|INSERT|DELETE|REPLACE|UPDATE|CREATE).*)'
res = re.match(pattern, text, re.IGNORECASE)
return True if res else False
def sformat(text, args):
"""
sformat
"""
args = args if args else {}
for key in args:
text = text.replace("{%s}" % key, "%s" % (args[key]))
return text
def lower(text):
"""
lower
"""
if isstring(text):
return text.lower()
elif isarray(text):
return [lower(item) for item in text]
return ""
def upper(text):
"""
upper
"""
if isstring(text):
return text.upper()
elif isarray(text):
return [upper(item) for item in text]
return ""
def padr(text, n, c):
"""
padr - right pad of text with character c
"""
text = str(text)
return text + str(c) * (n - len(text))
def padl(text, n, c):
"""
left pad of text with character c
"""
text = str(text)
return str(c) * (n - len(text)) + text
def trim(text, toremove=' '):
"""
trim - trim all array
"""
toremove = toremove[0]
if isstring(text):
return text.strip(toremove)
elif isarray(text):
return [trim(item, toremove) for item in text if len(item) > 0]
return text
def ltrim(text, toremove):
"""
ltrim - left trim
"""
toremove = toremove[0]
if isstring(text):
return text.lstrip(toremove)
elif isarray(text):
return [ltrim(item, toremove) for item in text if len(item) > 0]
return text
def chrtran(text, tosearch, toreplace):
"""
chrtran
"""
for j in range(0, len(tosearch)):
c = toreplace[j] if j in range(0, len(toreplace)) else ""
text = text.replace(tosearch[j], c)
return text
def startswith(text, elenco, casesensitive=True):
"""
startswith - Returns True if the text starts with one of ...
"""
for item in listify(elenco, ","):
if casesensitive:
if text.startswith(item):
return True
else:
if text.lower().startswith(item.lower()):
return True
return False
def endswith(text, elenco, casesensitive=True):
"""
endswith - Returns True if the text ends with one of ...
"""
for item in listify(elenco, ","):
if casesensitive:
if text.endswith(item):
return True
else:
if text.lower().endswith(item.lower()):
return True
return False
def leftpart(text, sep, included = False):
"""
leftpart
"""
if isstring(text):
arr = text.split(sep, 1)
if len(arr) >= 1:
return arr[0] + sep if included else arr[0]
elif isarray(text):
return [leftpart(item, sep, included) for item in text]
def rightpart(text, sep, included = False):
"""
rightpart
"""
if isstring(text):
arr = text.split(sep, 1)
if len(arr) > 1:
return sep + arr[1] if included else arr[1]
elif isarray(text):
return [rightpart(item, sep, included) for item in text]
return ""
def tempname(prefix="", postfix="", ext=""):
"""
tempname -returns a temporary name
"""
uid = random.randint(0,1e6)
ext = "."+ext if ext else ""
return "%s%s%s%s"%(prefix,uid,postfix,ext)
def textin(text, prefix, postfix, casesensitive=True):
"""
textin - return text between prefix and suffix excluded
"""
if casesensitive:
g = re.search(r'(?<=' + prefix + ')(.*?)(?=' + postfix + ')', text)
else:
g = re.search(r'(?<=' + prefix + ')(.*?)(?=' + postfix + ')', text, re.IGNORECASE)
return g.group() if g else ""
def textbetween(text, prefix, postfix, casesensitive=True):
"""
textin - return text between prefix and suffix excluded
"""
if casesensitive:
g = re.search(r'' + prefix + '(.*?)' + postfix, text, re.DOTALL)
else:
g = re.search(r'' + prefix + '(.*?)' + postfix, text, re.IGNORECASE|re.DOTALL)
return g.group() if g else ""
def normalizestring(text):
"""
normalizestring
"""
return re.sub(r'\s+', ' ', text)
def wrap(text, leftc, rightc=None):
"""
wrap
"""
if isstring(text):
rightc = leftc if rightc is None else rightc
return leftc + text + rightc
elif isarray(text):
return [wrap(item, leftc, rightc) for item in text]
def unwrap(text, leftc, rightc=None):
"""
unwrap
"""
if isstring(text):
rightc = leftc if rightc is None else rightc
start = len(leftc)
end = len(rightc)
while text.startswith(leftc) and text.endswith(rightc):
text = text[start:-end]
return text
elif isarray(text):
return [unwrap(item, leftc, rightc) for item in text]
def split(text, sep=" ", glue="'", removeEmpty=False):
"""
split - a variant of split with glue characters
"""
res = []
word = ""
dontsplit = False
lookahead = len(sep)
for j in range(0, len(text)):
c = text[j]
ca = text[j:j+lookahead]
if c in glue:
dontsplit = not dontsplit
if ca == sep and not dontsplit:
res.append(word)
word = ""
else:
word += c
if not removeEmpty or len(word.strip()) > 0:
res.append(word)
return res
def listify(text, sep=",", glue="\""):
"""
listify - make a list from string
"""
if text is None:
return []
elif isstring(text):
return split(text, sep, glue, removeEmpty=True)
elif isarray(text):
return text
return [text]
def arr2dict(arr,keyname="key",valuename="value"):
"""
arr2dict - transform an array to dictionary key:
"""
res = {}
for item in arr:
res[item[keyname]]= item[valuename] if valuename in item else None
return res
def mapify(text, sep=",", kvsep="=", strip_char=" ", glue= "\"", parsing=False):
"""
Growup a dictionary from text string
"""
# text = "hello=world,good=bye"
items = listify(text, sep, glue)
res = {}
for item in items:
item = item.strip(strip_char)
arr = item.split(kvsep, 1)
if len(arr)==1:
key, value = arr[0], ""
elif len(arr)==2:
key, value = arr
key, value = key.strip(strip_char).strip(glue), value.strip(strip_char).strip(glue)
if parsing:
#value = parseValue(value)
value = (value)
res[key] = value
return res
def replaceAll(text, search, replace):
"""
replaceAll
"""
return re.sub(text, search, replace )
| en | 0.523065 | # ----------------------------------------------------------------------------- # Licence: # Copyright (c) 2012-2019 <NAME> # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # # Name: strings.py # Purpose: # # Author: <NAME> # # Created: 27/12/2012 # ----------------------------------------------------------------------------- isstring - Returns True if the variable is a string #return isinstance(var, (str, unicode)) #Python2 #Python2 Python3 isarray - Returns True if the variable is a list isnumeric - say yes if it'a number isquery sformat lower upper padr - right pad of text with character c left pad of text with character c trim - trim all array ltrim - left trim chrtran startswith - Returns True if the text starts with one of ... endswith - Returns True if the text ends with one of ... leftpart rightpart tempname -returns a temporary name textin - return text between prefix and suffix excluded textin - return text between prefix and suffix excluded normalizestring wrap unwrap split - a variant of split with glue characters listify - make a list from string arr2dict - transform an array to dictionary key: Growup a dictionary from text string # text = "hello=world,good=bye" #value = parseValue(value) replaceAll | 2.724059 | 3 |
tests/rpreport/test_rpreport.py | niraito/rptools | 0 | 6624875 | from rptools.rpreport.rp_report import (
run_report
)
import os
import fnmatch
import tempfile
import filecmp
__test__ = True
data_path = os.path.join(
os.path.dirname(__file__),
'data'
)
data_input_dir_path = os.path.join(
data_path,
'input'
)
data_input_tar_file = os.path.join(
data_input_dir_path,
'input_rpSBML.tar'
)
data_output_dir_path = os.path.join(
data_path,
'output'
)
data_output_standalone_file = os.path.join(
data_path,
'standalone_output',
'index.html'
)
data_output_js_file = os.path.join(
data_path,
'output',
'js',
'data.js'
)
#files = fnmatch.filter(os.listdir(data_input_dir_path), "*.xml")
#files.sort()
def test_run_report_standalone_from_dir():
# testing standalone file output from files into a directory
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(True, data_input_dir_path, tmp_folder, False, False, True)
tested_output_single_file_html = os.path.join(
tmp_folder,
'index.html'
)
with open(tested_output_single_file_html, 'r') as test_f:
test_content = test_f.read()
with open(data_output_standalone_file, 'r') as ref_f:
ref_content = ref_f.read()
assert test_content == ref_content
# assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False)
def test_run_report_standalone():
# testing standalone file output from tar file
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(False, data_input_tar_file, tmp_folder, False, False, True)
tested_output_single_file_html = os.path.join(
tmp_folder,
'index.html'
)
with open(tested_output_single_file_html, 'r') as test_f:
test_content = test_f.read()
with open(data_output_standalone_file, 'r') as ref_f:
ref_content = ref_f.read()
assert test_content == ref_content
# assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False)
def test_run_report():
# testing files output from tar file
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(False, data_input_tar_file, tmp_folder, False, False, False)
tested_output_js_file = os.path.join(
tmp_folder,
'js',
'data.js'
)
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-grid.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-grid.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-theme-alpine.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'bootstrap.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'bootstrap.min.css.map'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'ag-charts-community.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'ag-grid-community.min.noStyle.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'bootstrap.bundle.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'bootstrap.bundle.min.js.map'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'jquery-3.6.0.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'main.js'))
assert filecmp.cmp(tested_output_js_file, data_output_js_file, shallow=False) | from rptools.rpreport.rp_report import (
run_report
)
import os
import fnmatch
import tempfile
import filecmp
__test__ = True
data_path = os.path.join(
os.path.dirname(__file__),
'data'
)
data_input_dir_path = os.path.join(
data_path,
'input'
)
data_input_tar_file = os.path.join(
data_input_dir_path,
'input_rpSBML.tar'
)
data_output_dir_path = os.path.join(
data_path,
'output'
)
data_output_standalone_file = os.path.join(
data_path,
'standalone_output',
'index.html'
)
data_output_js_file = os.path.join(
data_path,
'output',
'js',
'data.js'
)
#files = fnmatch.filter(os.listdir(data_input_dir_path), "*.xml")
#files.sort()
def test_run_report_standalone_from_dir():
# testing standalone file output from files into a directory
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(True, data_input_dir_path, tmp_folder, False, False, True)
tested_output_single_file_html = os.path.join(
tmp_folder,
'index.html'
)
with open(tested_output_single_file_html, 'r') as test_f:
test_content = test_f.read()
with open(data_output_standalone_file, 'r') as ref_f:
ref_content = ref_f.read()
assert test_content == ref_content
# assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False)
def test_run_report_standalone():
# testing standalone file output from tar file
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(False, data_input_tar_file, tmp_folder, False, False, True)
tested_output_single_file_html = os.path.join(
tmp_folder,
'index.html'
)
with open(tested_output_single_file_html, 'r') as test_f:
test_content = test_f.read()
with open(data_output_standalone_file, 'r') as ref_f:
ref_content = ref_f.read()
assert test_content == ref_content
# assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False)
def test_run_report():
# testing files output from tar file
with tempfile.TemporaryDirectory() as tmp_folder:
run_report(False, data_input_tar_file, tmp_folder, False, False, False)
tested_output_js_file = os.path.join(
tmp_folder,
'js',
'data.js'
)
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-grid.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-grid.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'ag-theme-alpine.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'bootstrap.min.css'))
assert os.path.exists(os.path.join(data_output_dir_path, 'css', 'bootstrap.min.css.map'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'ag-charts-community.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'ag-grid-community.min.noStyle.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'bootstrap.bundle.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'bootstrap.bundle.min.js.map'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'jquery-3.6.0.min.js'))
assert os.path.exists(os.path.join(data_output_dir_path, 'js', 'main.js'))
assert filecmp.cmp(tested_output_js_file, data_output_js_file, shallow=False) | en | 0.40342 | #files = fnmatch.filter(os.listdir(data_input_dir_path), "*.xml") #files.sort() # testing standalone file output from files into a directory # assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False) # testing standalone file output from tar file # assert filecmp.cmp(tested_output_single_file_html, data_output_standalone_file, shallow=False) # testing files output from tar file | 2.033919 | 2 |
uplift_modeling/uplift_tools/metrics.py | smn-ailab/ysaito-qiita | 14 | 6624876 | <filename>uplift_modeling/uplift_tools/metrics.py
"""This Module contains tools to evaluate uplift modeling algorithoms."""
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from plotly.graph_objs import Bar, Box, Figure, Layout, Scatter
from plotly.offline import init_notebook_mode, iplot, plot
def uplift_frame(outcome: list, treat: list, score: list) -> pd.DataFrame:
"""create a DataFrame that is used to evaluate uplift-model.
:param outcome: list of integers or floats which represent the target variable for each suject in the test data.
:param treat: list of integers which represent whether a sunject is in the treatment group or not.
:param score: list of floats which represent computed uplift-scores for each subject.
:return: pd.DataFrame which contains experimental result of a given uplift modleing approach.
"""
# sorting by uplift socre
result = DataFrame(list(zip(outcome, treat, uplift_score)), columns=["outcome", "treat", "uplift_score"]).sort_values(by="uplift_score", ascending=False)
# initializing
treat_uu = 0 # the number of subjects in the treatment group at each step
control_uu = 0 # the number of subjects in the control group at each step
y_treat = 0 # the sum of the outcome variable in the treatment group at each step
y_control = 0 # the sum of the outcome variable in the control group at each step
y_treat_avg = 0 # the average of the outcome variable in the treatment group at each step
y_control_avg = 0 # the average of the outcome variable in the control group at each step
lift = 0.0 # the calicurated "uplift" at each step
stat_data = []
# calc num of subjects, total outcome, avg outcome
for index, rows in result.iterrows():
if rows.treat == 1:
treat_uu += 1
y_treat += rows.outcome
y_treat_avg = y_treat / treat_uu
else:
control_uu += 1
y_control += y
y_control_avg = y_control / control_uu
# calc "lift" at each step
lift = (y_treat_avg - y_control_avg) * (treat_uu + control_uu)
stat_data.append([y, is_treat, score, treat_uu, control_uu,
y_treat, y_control, y_treat_avg, y_control_avg, lift])
# convert stat_data to DataFrame
df = DataFrame(stat_data)
df.columns = [["y", "is_treat", "score", "treat_uu", "control_uu",
"y_treat", "y_control", "y_treat_avg", "y_control_avg", "lift"]]
# calc base_line at each step
df["base_line"] = df.index * df.loc[len(df.index) - 1, "lift"].values[0] / len(df.index)
return df
def uplift_bar(outcome: list, treat: list, score: list, task="classification", output=False, file_name=False) -> Bar:
"""create a DataFrame that is used to evaluate uplift-model for classification settings.
:param outcome: list of integer which represent the target class for each suject in the test data.
:param treat: list of bools which represent whether a sunject is in the treatment group or not.
:param score: list of floats which represent computed uplift-scores for each subject.
:param output: whether output the scatter file or not.
:param file_name: the name of the output file.
:return: pd.DataFrame which contains experimental result of a given uplift modleing approach.
"""
# sorting by uplift socre
test_list = DataFrame(list(zip(outcome, treat, uplift_score)), columns=["outcome", "treat", "uplift_score"]).sort_values(by="uplift_score", ascending=False)
qdf = DataFrame(columns=("y_treat", "y_control"))
# sort by uplift-score and divid into 10 groups
for n in range(10):
start = int(n * len(test_list) / 10)
end = int((n + 1) * len(test_list) / 10) - 1
quantiled_result = test_list[start:end]
# count the num of subjects in treatment and control group at each decile
treat_uu = list(map(lambda item: item[1], quantiled_result)).count(True)
control_uu = list(map(lambda item: item[1], quantiled_result)).count(False)
# calc the avg outcome for treatment and control group at each decile
if task == "classification":
treat_cv_list = []
control_cv_list = []
for item in quantiled_result:
if item[1]:
treat_cv_list.append(item[0])
else:
control_cv_list.append(item[0])
treat_cv = treat_cv_list.count(True)
control_cv = control_cv_list.count(True)
y_treat = treat_cv / treat_uu
y_control = control_cv / control_uu
elif task == "regression":
y_treat_list = []
y_control_list = []
for item in quantiled_result:
if item[1]:
y_treat_list.append(item[0])
else:
y_control_list.append(item[0])
y_treat = mean(y_treat_list)
y_control = mean(y_control_list)
label = "{}%~{}%".format(n * 10, (n + 1) * 10)
qdf.loc[label] = [y_treat, y_control]
trace1 = Bar(x=qdf.index.tolist(),
y=qdf.y_treat.values.tolist(), name="treat")
trace2 = Bar(x=qdf.index.tolist(),
y=qdf.y_control.values.tolist(), name="control")
layout = Layout(barmode="group", yaxis={"title": "Mean Outcome"}, xaxis={"title": "Uplift Score Percentile"})
fig = Figure(data=[trace1, trace2], layout=layout)
iplot(fig)
if output:
plot(fig, filename=file_name)
def uplift_curve(df: pd.DataFrame, score_name: string, output=[False, False, False, False], file_names=None) -> Scatter:
"""plot uplift_curves for the given uplift modeling approach.
:param df: pd.DataFrame which contains experimental result of a given uplift modleing approach.
this is the output of the functin "uplift_frame".
:param score_name: the name of the used uplift-socre.
:param output: whether output each scatter file or not.
:param file_names: list of the names of the output file.
:return: computed AUUC and uplift_curve.
"""
# calc Area Under Uplift Curve
auuc = round(((np.array(df.lift) - np.array(df.base_line)).sum()) / len(df.lift), 3)
# Total Outcome sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]), y=df.y_treat.T.values[0], name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.y_control.T.values[0], name="control")
data = [trace1, trace2]b
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Total Outcome"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[0]:
plot(fig, filename=file_names[0])
# Avg Outcome sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]), y=df.y_treat_avg.T.values[0], name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.y_control_avg.T.values[0], name="control")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Mean Outcome"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[1]:
plot(fig, filename=file_names[1])
# Lift Curve sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]),
y=df.lift.T.values[0],
name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.base_line.T.values[0], name="baseline")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Uplift"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[2]:
plot(fig, filename=file_names[2])
# Lift Curve sorted by Uplift Score
trace1 = Scatter(x=df.score.T.values[0], y=df.lift.T.values[0], name="treat")
trace2 = Scatter(x=df.score.T.values[0], y=df.base_line.T.values[0], name="baseline")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Lift".format(score_name)},
xaxis={"title": "{}".format(score_name), "autorange": "reversed"})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[3]:
plot(fig, filename=file_names[3])
| <filename>uplift_modeling/uplift_tools/metrics.py
"""This Module contains tools to evaluate uplift modeling algorithoms."""
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from plotly.graph_objs import Bar, Box, Figure, Layout, Scatter
from plotly.offline import init_notebook_mode, iplot, plot
def uplift_frame(outcome: list, treat: list, score: list) -> pd.DataFrame:
"""create a DataFrame that is used to evaluate uplift-model.
:param outcome: list of integers or floats which represent the target variable for each suject in the test data.
:param treat: list of integers which represent whether a sunject is in the treatment group or not.
:param score: list of floats which represent computed uplift-scores for each subject.
:return: pd.DataFrame which contains experimental result of a given uplift modleing approach.
"""
# sorting by uplift socre
result = DataFrame(list(zip(outcome, treat, uplift_score)), columns=["outcome", "treat", "uplift_score"]).sort_values(by="uplift_score", ascending=False)
# initializing
treat_uu = 0 # the number of subjects in the treatment group at each step
control_uu = 0 # the number of subjects in the control group at each step
y_treat = 0 # the sum of the outcome variable in the treatment group at each step
y_control = 0 # the sum of the outcome variable in the control group at each step
y_treat_avg = 0 # the average of the outcome variable in the treatment group at each step
y_control_avg = 0 # the average of the outcome variable in the control group at each step
lift = 0.0 # the calicurated "uplift" at each step
stat_data = []
# calc num of subjects, total outcome, avg outcome
for index, rows in result.iterrows():
if rows.treat == 1:
treat_uu += 1
y_treat += rows.outcome
y_treat_avg = y_treat / treat_uu
else:
control_uu += 1
y_control += y
y_control_avg = y_control / control_uu
# calc "lift" at each step
lift = (y_treat_avg - y_control_avg) * (treat_uu + control_uu)
stat_data.append([y, is_treat, score, treat_uu, control_uu,
y_treat, y_control, y_treat_avg, y_control_avg, lift])
# convert stat_data to DataFrame
df = DataFrame(stat_data)
df.columns = [["y", "is_treat", "score", "treat_uu", "control_uu",
"y_treat", "y_control", "y_treat_avg", "y_control_avg", "lift"]]
# calc base_line at each step
df["base_line"] = df.index * df.loc[len(df.index) - 1, "lift"].values[0] / len(df.index)
return df
def uplift_bar(outcome: list, treat: list, score: list, task="classification", output=False, file_name=False) -> Bar:
"""create a DataFrame that is used to evaluate uplift-model for classification settings.
:param outcome: list of integer which represent the target class for each suject in the test data.
:param treat: list of bools which represent whether a sunject is in the treatment group or not.
:param score: list of floats which represent computed uplift-scores for each subject.
:param output: whether output the scatter file or not.
:param file_name: the name of the output file.
:return: pd.DataFrame which contains experimental result of a given uplift modleing approach.
"""
# sorting by uplift socre
test_list = DataFrame(list(zip(outcome, treat, uplift_score)), columns=["outcome", "treat", "uplift_score"]).sort_values(by="uplift_score", ascending=False)
qdf = DataFrame(columns=("y_treat", "y_control"))
# sort by uplift-score and divid into 10 groups
for n in range(10):
start = int(n * len(test_list) / 10)
end = int((n + 1) * len(test_list) / 10) - 1
quantiled_result = test_list[start:end]
# count the num of subjects in treatment and control group at each decile
treat_uu = list(map(lambda item: item[1], quantiled_result)).count(True)
control_uu = list(map(lambda item: item[1], quantiled_result)).count(False)
# calc the avg outcome for treatment and control group at each decile
if task == "classification":
treat_cv_list = []
control_cv_list = []
for item in quantiled_result:
if item[1]:
treat_cv_list.append(item[0])
else:
control_cv_list.append(item[0])
treat_cv = treat_cv_list.count(True)
control_cv = control_cv_list.count(True)
y_treat = treat_cv / treat_uu
y_control = control_cv / control_uu
elif task == "regression":
y_treat_list = []
y_control_list = []
for item in quantiled_result:
if item[1]:
y_treat_list.append(item[0])
else:
y_control_list.append(item[0])
y_treat = mean(y_treat_list)
y_control = mean(y_control_list)
label = "{}%~{}%".format(n * 10, (n + 1) * 10)
qdf.loc[label] = [y_treat, y_control]
trace1 = Bar(x=qdf.index.tolist(),
y=qdf.y_treat.values.tolist(), name="treat")
trace2 = Bar(x=qdf.index.tolist(),
y=qdf.y_control.values.tolist(), name="control")
layout = Layout(barmode="group", yaxis={"title": "Mean Outcome"}, xaxis={"title": "Uplift Score Percentile"})
fig = Figure(data=[trace1, trace2], layout=layout)
iplot(fig)
if output:
plot(fig, filename=file_name)
def uplift_curve(df: pd.DataFrame, score_name: string, output=[False, False, False, False], file_names=None) -> Scatter:
"""plot uplift_curves for the given uplift modeling approach.
:param df: pd.DataFrame which contains experimental result of a given uplift modleing approach.
this is the output of the functin "uplift_frame".
:param score_name: the name of the used uplift-socre.
:param output: whether output each scatter file or not.
:param file_names: list of the names of the output file.
:return: computed AUUC and uplift_curve.
"""
# calc Area Under Uplift Curve
auuc = round(((np.array(df.lift) - np.array(df.base_line)).sum()) / len(df.lift), 3)
# Total Outcome sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]), y=df.y_treat.T.values[0], name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.y_control.T.values[0], name="control")
data = [trace1, trace2]b
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Total Outcome"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[0]:
plot(fig, filename=file_names[0])
# Avg Outcome sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]), y=df.y_treat_avg.T.values[0], name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.y_control_avg.T.values[0], name="control")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Mean Outcome"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[1]:
plot(fig, filename=file_names[1])
# Lift Curve sorted by Uplift Rank
trace1 = Scatter(x=np.arange(df.shape[0]),
y=df.lift.T.values[0],
name="treat")
trace2 = Scatter(x=np.arange(df.shape[0]), y=df.base_line.T.values[0], name="baseline")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Uplift"}, xaxis={"title": "{} Rank".format(score_name)})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[2]:
plot(fig, filename=file_names[2])
# Lift Curve sorted by Uplift Score
trace1 = Scatter(x=df.score.T.values[0], y=df.lift.T.values[0], name="treat")
trace2 = Scatter(x=df.score.T.values[0], y=df.base_line.T.values[0], name="baseline")
data = [trace1, trace2]
layout = Layout(title="AUUC = {}".format(auuc), yaxis={"title": "Lift".format(score_name)},
xaxis={"title": "{}".format(score_name), "autorange": "reversed"})
fig = Figure(data=data, layout=layout)
iplot(fig)
if output[3]:
plot(fig, filename=file_names[3])
| en | 0.863134 | This Module contains tools to evaluate uplift modeling algorithoms. create a DataFrame that is used to evaluate uplift-model. :param outcome: list of integers or floats which represent the target variable for each suject in the test data. :param treat: list of integers which represent whether a sunject is in the treatment group or not. :param score: list of floats which represent computed uplift-scores for each subject. :return: pd.DataFrame which contains experimental result of a given uplift modleing approach. # sorting by uplift socre # initializing # the number of subjects in the treatment group at each step # the number of subjects in the control group at each step # the sum of the outcome variable in the treatment group at each step # the sum of the outcome variable in the control group at each step # the average of the outcome variable in the treatment group at each step # the average of the outcome variable in the control group at each step # the calicurated "uplift" at each step # calc num of subjects, total outcome, avg outcome # calc "lift" at each step # convert stat_data to DataFrame # calc base_line at each step create a DataFrame that is used to evaluate uplift-model for classification settings. :param outcome: list of integer which represent the target class for each suject in the test data. :param treat: list of bools which represent whether a sunject is in the treatment group or not. :param score: list of floats which represent computed uplift-scores for each subject. :param output: whether output the scatter file or not. :param file_name: the name of the output file. :return: pd.DataFrame which contains experimental result of a given uplift modleing approach. # sorting by uplift socre # sort by uplift-score and divid into 10 groups # count the num of subjects in treatment and control group at each decile # calc the avg outcome for treatment and control group at each decile plot uplift_curves for the given uplift modeling approach. :param df: pd.DataFrame which contains experimental result of a given uplift modleing approach. this is the output of the functin "uplift_frame". :param score_name: the name of the used uplift-socre. :param output: whether output each scatter file or not. :param file_names: list of the names of the output file. :return: computed AUUC and uplift_curve. # calc Area Under Uplift Curve # Total Outcome sorted by Uplift Rank # Avg Outcome sorted by Uplift Rank # Lift Curve sorted by Uplift Rank # Lift Curve sorted by Uplift Score | 2.712638 | 3 |
app/apiv2/organizations/locations/roles/schedules/schedule.py | partnerhero/staffjoy | 0 | 6624877 | import json
from flask import g, current_app
from flask_restful import marshal, abort, reqparse, Resource
from app import db
from app.constants import API_ENVELOPE
from app.models import Schedule2, Organization
from app.caches import Schedules2Cache
from app.apiv2.decorators import verify_org_location_role_schedule, \
permission_location_member, permission_location_manager
from app.apiv2.helpers import verify_days_of_week_struct
from app.apiv2.marshal import schedule_fields
class ScheduleApi(Resource):
@verify_org_location_role_schedule
@permission_location_member
def get(self, org_id, location_id, role_id, schedule_id):
response = {
API_ENVELOPE: {},
"resources":
["preferences", "shifts", "timeclocks", "timeoffrequests"],
}
schedule = Schedule2.query.get_or_404(schedule_id)
schedule = marshal(schedule, schedule_fields)
response[API_ENVELOPE] = schedule
return response
@verify_org_location_role_schedule
@permission_location_manager
def patch(self, org_id, location_id, role_id, schedule_id):
schedule = Schedule2.query.get_or_404(schedule_id)
org = Organization.query.get_or_404(org_id)
parser = reqparse.RequestParser()
parser.add_argument("demand", type=str)
parser.add_argument("state", type=str)
parser.add_argument("min_shift_length_hour", type=int)
parser.add_argument("max_shift_length_hour", type=int)
changes = parser.parse_args(strict=True)
# Filter out null values
changes = dict((k, v) for k, v in changes.iteritems() if v is not None)
original_state = schedule.state
if len(changes) == 0:
return {"message": "No valid changes detected"}, 400
# schedule can only be modified from initial or unpublished state if not sudo
if not g.current_user.is_sudo():
if original_state not in ["initial", "unpublished"]:
return {
"message":
"You are not able to modify a schedule from its current state."
}, 400
if "min_shift_length_hour" in changes:
min_shift_length_half_hour = changes["min_shift_length_hour"] * 2
else:
min_shift_length_half_hour = schedule.min_shift_length_half_hour
if "max_shift_length_hour" in changes:
max_shift_length_half_hour = changes["max_shift_length_hour"] * 2
else:
max_shift_length_half_hour = schedule.max_shift_length_half_hour
# now verification
# NOTE that if we choose to support lengths of 0, these 1st two checks will break
# because None and 0 get evalulated as the same
if bool(min_shift_length_half_hour) != bool(
max_shift_length_half_hour):
return {
"message":
"min_shift_length_hour and max_shift_length_hour most both be defined"
}, 400
if min_shift_length_half_hour and max_shift_length_half_hour:
if min_shift_length_half_hour > max_shift_length_half_hour:
return {
"message":
"min_shift_length_hour cannot be greater than max_shift_length_hour"
}, 400
if min_shift_length_half_hour:
if not (1 <= min_shift_length_half_hour <= 46):
return {
"message": "min_shift_length_hour must be between 1 and 24"
}, 400
if max_shift_length_half_hour:
if not (1 <= max_shift_length_half_hour <= 46):
return {
"message": "max_shift_length_hour must be between 1 and 24"
}, 400
if "min_shift_length_hour" in changes:
del changes["min_shift_length_hour"]
changes["min_shift_length_half_hour"] = min_shift_length_half_hour
if "max_shift_length_hour" in changes:
del changes["max_shift_length_hour"]
changes["max_shift_length_half_hour"] = max_shift_length_half_hour
if "demand" in changes:
# admins can only modify demand in the unpublished state
if not g.current_user.is_sudo():
if changes.get("state", schedule.state) not in [
"unpublished", "chomp-queue"
]:
return {
"message":
"Admins can only modify demand when the schedule is in the unpublished state."
}, 400
# demand can be set to None when it is sent down without a value in the request
# (not "") will resolve to True, which we consider None - assume json for all other cases
if not changes["demand"]:
changes["demand"] = None
else:
try:
demand = json.loads(changes.get("demand"))
except:
return {"message": "Unable to parse demand json body"}, 400
if demand is None or not isinstance(demand, dict):
return {"message": "Unable to parse demand json body"}, 400
# Check that days of week are right
if not verify_days_of_week_struct(demand):
return {"message": "demand is improperly formatted"}, 400
try:
changes["demand"] = json.dumps(demand)
except Exception as exception:
return {"message": "Unable to parse demand json body"}, 400
g.current_user.track_event("updated_demand")
if "state" in changes:
state = changes.get("state")
if state == original_state:
return {
"message": "Schedule is already in state %s." % state
}, 400
if state not in [
"unpublished", "chomp-queue", "mobius-queue", "published"
]:
return {
"message":
"State can only be updated to 'unpublished', 'chomp-queue', 'mobius-queue' or 'done'."
}, 400
if not org.active:
return {
"message":
"This organization must be active for a state change"
}, 400
if state == "chomp-queue":
if not changes.get("min_shift_length_half_hour",
schedule.min_shift_length_half_hour):
return {
"message":
"min_shift_length_hour must be set for chomp queue"
}, 400
if not changes.get("max_shift_length_half_hour",
schedule.max_shift_length_half_hour):
return {
"message":
"max_shift_length_hour must be set for chomp queue"
}, 400
if original_state not in ["unpublished", "chomp-processing"]:
return {"message": "This state change is not allowed"}, 400
# reset timing measurements - although they will soon be reset, the monitoring timing
# may be inaccurate for the duration of calculation (e.g. a requeue)
changes["chomp_start"] = None
changes["chomp_end"] = None
if not g.current_user.is_sudo():
g.current_user.track_event("chomp_schedule_calculation")
schedule.transition_to_chomp_queue()
elif state == "published":
if original_state not in ["unpublished", "mobius-processing"]:
return {"message": "This state change is not allowed"}, 400
schedule.transition_to_published()
if not g.current_user.is_sudo():
g.current_user.track_event("published_schedule")
elif state == "mobius-queue":
if original_state not in ["unpublished", "mobius-processing"]:
return {"message": "This state change is not allowed"}, 400
# reset timing measurements - although they will soon be reset, the monitoring timing
# may be inaccurate for the duration of calculation (e.g. a requeue)
changes["mobius_start"] = None
changes["mobius_end"] = None
schedule.transition_to_mobius_queue()
del changes["state"]
elif state == "unpublished":
if original_state not in ["initial", "chomp-processing"]:
return {
"message":
"Schedule cannot be set to unpublished from its current state"
}
schedule.transition_to_unpublished()
for change, value in changes.iteritems():
try:
setattr(schedule, change, value)
db.session.commit()
except Exception as exception:
db.session.rollback()
current_app.logger.exception(str(exception))
abort(400)
Schedules2Cache.delete(role_id)
return changes
| import json
from flask import g, current_app
from flask_restful import marshal, abort, reqparse, Resource
from app import db
from app.constants import API_ENVELOPE
from app.models import Schedule2, Organization
from app.caches import Schedules2Cache
from app.apiv2.decorators import verify_org_location_role_schedule, \
permission_location_member, permission_location_manager
from app.apiv2.helpers import verify_days_of_week_struct
from app.apiv2.marshal import schedule_fields
class ScheduleApi(Resource):
@verify_org_location_role_schedule
@permission_location_member
def get(self, org_id, location_id, role_id, schedule_id):
response = {
API_ENVELOPE: {},
"resources":
["preferences", "shifts", "timeclocks", "timeoffrequests"],
}
schedule = Schedule2.query.get_or_404(schedule_id)
schedule = marshal(schedule, schedule_fields)
response[API_ENVELOPE] = schedule
return response
@verify_org_location_role_schedule
@permission_location_manager
def patch(self, org_id, location_id, role_id, schedule_id):
schedule = Schedule2.query.get_or_404(schedule_id)
org = Organization.query.get_or_404(org_id)
parser = reqparse.RequestParser()
parser.add_argument("demand", type=str)
parser.add_argument("state", type=str)
parser.add_argument("min_shift_length_hour", type=int)
parser.add_argument("max_shift_length_hour", type=int)
changes = parser.parse_args(strict=True)
# Filter out null values
changes = dict((k, v) for k, v in changes.iteritems() if v is not None)
original_state = schedule.state
if len(changes) == 0:
return {"message": "No valid changes detected"}, 400
# schedule can only be modified from initial or unpublished state if not sudo
if not g.current_user.is_sudo():
if original_state not in ["initial", "unpublished"]:
return {
"message":
"You are not able to modify a schedule from its current state."
}, 400
if "min_shift_length_hour" in changes:
min_shift_length_half_hour = changes["min_shift_length_hour"] * 2
else:
min_shift_length_half_hour = schedule.min_shift_length_half_hour
if "max_shift_length_hour" in changes:
max_shift_length_half_hour = changes["max_shift_length_hour"] * 2
else:
max_shift_length_half_hour = schedule.max_shift_length_half_hour
# now verification
# NOTE that if we choose to support lengths of 0, these 1st two checks will break
# because None and 0 get evalulated as the same
if bool(min_shift_length_half_hour) != bool(
max_shift_length_half_hour):
return {
"message":
"min_shift_length_hour and max_shift_length_hour most both be defined"
}, 400
if min_shift_length_half_hour and max_shift_length_half_hour:
if min_shift_length_half_hour > max_shift_length_half_hour:
return {
"message":
"min_shift_length_hour cannot be greater than max_shift_length_hour"
}, 400
if min_shift_length_half_hour:
if not (1 <= min_shift_length_half_hour <= 46):
return {
"message": "min_shift_length_hour must be between 1 and 24"
}, 400
if max_shift_length_half_hour:
if not (1 <= max_shift_length_half_hour <= 46):
return {
"message": "max_shift_length_hour must be between 1 and 24"
}, 400
if "min_shift_length_hour" in changes:
del changes["min_shift_length_hour"]
changes["min_shift_length_half_hour"] = min_shift_length_half_hour
if "max_shift_length_hour" in changes:
del changes["max_shift_length_hour"]
changes["max_shift_length_half_hour"] = max_shift_length_half_hour
if "demand" in changes:
# admins can only modify demand in the unpublished state
if not g.current_user.is_sudo():
if changes.get("state", schedule.state) not in [
"unpublished", "chomp-queue"
]:
return {
"message":
"Admins can only modify demand when the schedule is in the unpublished state."
}, 400
# demand can be set to None when it is sent down without a value in the request
# (not "") will resolve to True, which we consider None - assume json for all other cases
if not changes["demand"]:
changes["demand"] = None
else:
try:
demand = json.loads(changes.get("demand"))
except:
return {"message": "Unable to parse demand json body"}, 400
if demand is None or not isinstance(demand, dict):
return {"message": "Unable to parse demand json body"}, 400
# Check that days of week are right
if not verify_days_of_week_struct(demand):
return {"message": "demand is improperly formatted"}, 400
try:
changes["demand"] = json.dumps(demand)
except Exception as exception:
return {"message": "Unable to parse demand json body"}, 400
g.current_user.track_event("updated_demand")
if "state" in changes:
state = changes.get("state")
if state == original_state:
return {
"message": "Schedule is already in state %s." % state
}, 400
if state not in [
"unpublished", "chomp-queue", "mobius-queue", "published"
]:
return {
"message":
"State can only be updated to 'unpublished', 'chomp-queue', 'mobius-queue' or 'done'."
}, 400
if not org.active:
return {
"message":
"This organization must be active for a state change"
}, 400
if state == "chomp-queue":
if not changes.get("min_shift_length_half_hour",
schedule.min_shift_length_half_hour):
return {
"message":
"min_shift_length_hour must be set for chomp queue"
}, 400
if not changes.get("max_shift_length_half_hour",
schedule.max_shift_length_half_hour):
return {
"message":
"max_shift_length_hour must be set for chomp queue"
}, 400
if original_state not in ["unpublished", "chomp-processing"]:
return {"message": "This state change is not allowed"}, 400
# reset timing measurements - although they will soon be reset, the monitoring timing
# may be inaccurate for the duration of calculation (e.g. a requeue)
changes["chomp_start"] = None
changes["chomp_end"] = None
if not g.current_user.is_sudo():
g.current_user.track_event("chomp_schedule_calculation")
schedule.transition_to_chomp_queue()
elif state == "published":
if original_state not in ["unpublished", "mobius-processing"]:
return {"message": "This state change is not allowed"}, 400
schedule.transition_to_published()
if not g.current_user.is_sudo():
g.current_user.track_event("published_schedule")
elif state == "mobius-queue":
if original_state not in ["unpublished", "mobius-processing"]:
return {"message": "This state change is not allowed"}, 400
# reset timing measurements - although they will soon be reset, the monitoring timing
# may be inaccurate for the duration of calculation (e.g. a requeue)
changes["mobius_start"] = None
changes["mobius_end"] = None
schedule.transition_to_mobius_queue()
del changes["state"]
elif state == "unpublished":
if original_state not in ["initial", "chomp-processing"]:
return {
"message":
"Schedule cannot be set to unpublished from its current state"
}
schedule.transition_to_unpublished()
for change, value in changes.iteritems():
try:
setattr(schedule, change, value)
db.session.commit()
except Exception as exception:
db.session.rollback()
current_app.logger.exception(str(exception))
abort(400)
Schedules2Cache.delete(role_id)
return changes
| en | 0.88637 | # Filter out null values # schedule can only be modified from initial or unpublished state if not sudo # now verification # NOTE that if we choose to support lengths of 0, these 1st two checks will break # because None and 0 get evalulated as the same # admins can only modify demand in the unpublished state # demand can be set to None when it is sent down without a value in the request # (not "") will resolve to True, which we consider None - assume json for all other cases # Check that days of week are right # reset timing measurements - although they will soon be reset, the monitoring timing # may be inaccurate for the duration of calculation (e.g. a requeue) # reset timing measurements - although they will soon be reset, the monitoring timing # may be inaccurate for the duration of calculation (e.g. a requeue) | 2.114784 | 2 |
tensorflow_datasets/core/features/text_feature_test.py | rodrigob/datasets | 0 | 6624878 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for tensorflow_datasets.core.features.text_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
from tensorflow_datasets.core.features.text import text_encoder
tf.compat.v1.enable_eager_execution()
class TextFeatureTest(test_utils.FeatureExpectationsTestCase):
def test_text(self):
nonunicode_text = 'hello world'
unicode_text = u'你好'
self.assertFeature(
feature=features.Text(),
shape=(),
dtype=tf.string,
tests=[
# Non-unicode
test_utils.FeatureExpectationItem(
value=nonunicode_text,
expected=tf.compat.as_bytes(nonunicode_text),
),
# Unicode
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=tf.compat.as_bytes(unicode_text),
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=tf.compat.as_bytes(''),
),
],
)
def test_text_encoded(self):
unicode_text = u'你好'
# Unicode integer-encoded by byte
self.assertFeature(
feature=features.Text(encoder=text_encoder.ByteTextEncoder()),
shape=(None,),
dtype=tf.int64,
tests=[
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=[i + 1 for i in [228, 189, 160, 229, 165, 189]],
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=[],
),
],
)
def test_text_conversion(self):
text_f = features.Text(encoder=text_encoder.ByteTextEncoder())
text = u'你好'
self.assertEqual(text, text_f.ints2str(text_f.str2ints(text)))
def test_save_load_metadata(self):
text_f = features.Text(
encoder=text_encoder.ByteTextEncoder(additional_tokens=['HI']))
text = u'HI 你好'
ids = text_f.str2ints(text)
self.assertEqual(1, ids[0])
with test_utils.tmp_dir(self.get_temp_dir()) as data_dir:
feature_name = 'dummy'
text_f.save_metadata(data_dir, feature_name)
new_f = features.Text()
new_f.load_metadata(data_dir, feature_name)
self.assertEqual(ids, text_f.str2ints(text))
if __name__ == '__main__':
test_utils.main()
| # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for tensorflow_datasets.core.features.text_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
from tensorflow_datasets.core.features.text import text_encoder
tf.compat.v1.enable_eager_execution()
class TextFeatureTest(test_utils.FeatureExpectationsTestCase):
def test_text(self):
nonunicode_text = 'hello world'
unicode_text = u'你好'
self.assertFeature(
feature=features.Text(),
shape=(),
dtype=tf.string,
tests=[
# Non-unicode
test_utils.FeatureExpectationItem(
value=nonunicode_text,
expected=tf.compat.as_bytes(nonunicode_text),
),
# Unicode
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=tf.compat.as_bytes(unicode_text),
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=tf.compat.as_bytes(''),
),
],
)
def test_text_encoded(self):
unicode_text = u'你好'
# Unicode integer-encoded by byte
self.assertFeature(
feature=features.Text(encoder=text_encoder.ByteTextEncoder()),
shape=(None,),
dtype=tf.int64,
tests=[
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=[i + 1 for i in [228, 189, 160, 229, 165, 189]],
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=[],
),
],
)
def test_text_conversion(self):
text_f = features.Text(encoder=text_encoder.ByteTextEncoder())
text = u'你好'
self.assertEqual(text, text_f.ints2str(text_f.str2ints(text)))
def test_save_load_metadata(self):
text_f = features.Text(
encoder=text_encoder.ByteTextEncoder(additional_tokens=['HI']))
text = u'HI 你好'
ids = text_f.str2ints(text)
self.assertEqual(1, ids[0])
with test_utils.tmp_dir(self.get_temp_dir()) as data_dir:
feature_name = 'dummy'
text_f.save_metadata(data_dir, feature_name)
new_f = features.Text()
new_f.load_metadata(data_dir, feature_name)
self.assertEqual(ids, text_f.str2ints(text))
if __name__ == '__main__':
test_utils.main()
| en | 0.799254 | # coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding=utf-8 Tests for tensorflow_datasets.core.features.text_feature. # Non-unicode # Unicode # Empty string # Unicode integer-encoded by byte # Empty string | 2.378927 | 2 |
hardware/opentrons_hardware/drivers/can_bus/abstract_driver.py | anuwrag/opentrons | 2 | 6624879 | """The can bus transport."""
from __future__ import annotations
from abc import ABC, abstractmethod
from opentrons_hardware.firmware_bindings import CanMessage
class AbstractCanDriver(ABC):
"""Can driver interface."""
@abstractmethod
async def send(self, message: CanMessage) -> None:
"""Send a can message.
Args:
message: The message to send.
Returns:
None
"""
...
@abstractmethod
async def read(self) -> CanMessage:
"""Read a message.
Returns:
A can message
Raises:
ErrorFrameCanError
"""
...
def __aiter__(self) -> AbstractCanDriver:
"""Enter iterator.
Returns:
CanDriver
"""
return self
async def __anext__(self) -> CanMessage:
"""Async next.
Returns:
CanMessage
"""
return await self.read()
@abstractmethod
def shutdown(self) -> None:
"""Stop the driver."""
...
| """The can bus transport."""
from __future__ import annotations
from abc import ABC, abstractmethod
from opentrons_hardware.firmware_bindings import CanMessage
class AbstractCanDriver(ABC):
"""Can driver interface."""
@abstractmethod
async def send(self, message: CanMessage) -> None:
"""Send a can message.
Args:
message: The message to send.
Returns:
None
"""
...
@abstractmethod
async def read(self) -> CanMessage:
"""Read a message.
Returns:
A can message
Raises:
ErrorFrameCanError
"""
...
def __aiter__(self) -> AbstractCanDriver:
"""Enter iterator.
Returns:
CanDriver
"""
return self
async def __anext__(self) -> CanMessage:
"""Async next.
Returns:
CanMessage
"""
return await self.read()
@abstractmethod
def shutdown(self) -> None:
"""Stop the driver."""
...
| en | 0.49132 | The can bus transport. Can driver interface. Send a can message. Args: message: The message to send. Returns: None Read a message. Returns: A can message Raises: ErrorFrameCanError Enter iterator. Returns: CanDriver Async next. Returns: CanMessage Stop the driver. | 2.985161 | 3 |
website/registration/migrations/0009_mark_result.py | CodeJosh723/thesis-review-system | 1 | 6624880 | # Generated by Django 3.0.7 on 2020-07-31 08:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0008_auto_20200728_2047'),
]
operations = [
migrations.AddField(
model_name='mark',
name='result',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='marks', to='registration.Result'),
),
]
| # Generated by Django 3.0.7 on 2020-07-31 08:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0008_auto_20200728_2047'),
]
operations = [
migrations.AddField(
model_name='mark',
name='result',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='marks', to='registration.Result'),
),
]
| en | 0.756911 | # Generated by Django 3.0.7 on 2020-07-31 08:21 | 1.414277 | 1 |
eng_to_ipa/transcribe.py | binu-alexander/English-to-IPA | 195 | 6624881 | # -*- coding: utf-8 -*-
import re
from os.path import join, abspath, dirname
import eng_to_ipa.stress as stress
from collections import defaultdict
class ModeType(object):
def __init__(self, mode):
self.name = mode
if mode.lower() == "sql":
import sqlite3
conn = sqlite3.connect(join(abspath(dirname(__file__)),
"./resources/CMU_dict.db"))
self.mode = conn.cursor()
elif mode.lower() == "json":
import json
json_file = open(join(abspath(dirname(__file__)),
"../eng_to_ipa/resources/CMU_dict.json"),
encoding="UTF-8")
self.mode = json.load(json_file)
def __str__(self):
return self.name
def preprocess(words):
"""Returns a string of words stripped of punctuation"""
punct_str = '!"#$%&\'()*+,-./:;<=>/?@[\\]^_`{|}~«» '
return ' '.join([w.strip(punct_str).lower() for w in words.split()])
def preserve_punc(words):
"""converts words to IPA and finds punctuation before and after the word."""
words_preserved = []
for w in words.split():
punct_list = ["", preprocess(w), ""]
before = re.search(r"^([^A-Za-z0-9]+)[A-Za-z]", w)
after = re.search(r"[A-Za-z]([^A-Za-z0-9]+)$", w)
if before:
punct_list[0] = str(before.group(1))
if after:
punct_list[2] = str(after.group(1))
words_preserved.append(punct_list)
return words_preserved
def apply_punct(triple, as_str=False):
"""places surrounding punctuation back on center on a list of preserve_punc triples"""
if type(triple[0]) == list:
for i, t in enumerate(triple):
triple[i] = str(''.join(triple[i]))
if as_str:
return ' '.join(triple)
return triple
if as_str:
return str(''.join(t for t in triple))
return [''.join(t for t in triple)]
def _punct_replace_word(original, transcription):
"""Get the IPA transcription of word with the original punctuation marks"""
for i, trans_list in enumerate(transcription):
for j, item in enumerate(trans_list):
triple = [original[i][0]] + [item] + [original[i][2]]
transcription[i][j] = apply_punct(triple, as_str=True)
return transcription
def fetch_words(words_in, db_type="sql"):
"""fetches a list of words from the database"""
asset = ModeType(mode=db_type).mode
if db_type.lower() == "sql":
quest = "?, " * len(words_in)
asset.execute("SELECT word, phonemes FROM dictionary "
"WHERE word IN ({0})".format(quest[:-2]), words_in)
result = asset.fetchall()
d = defaultdict(list)
for k, v in result:
d[k].append(v)
return list(d.items())
if db_type.lower() == "json":
words = []
for k, v in asset.items():
if k in words_in:
words.append((k, v))
return words
def get_cmu(tokens_in, db_type="sql"):
"""query the SQL database for the words and return the phonemes in the order of user_in"""
result = fetch_words(tokens_in, db_type)
ordered = []
for word in tokens_in:
this_word = [[i[1] for i in result if i[0] == word]][0]
if this_word:
ordered.append(this_word[0])
else:
ordered.append(["__IGNORE__" + word])
return ordered
def cmu_to_ipa(cmu_list, mark=True, stress_marking='all'):
"""converts the CMU word lists into IPA transcriptions"""
symbols = {"a": "ə", "ey": "eɪ", "aa": "ɑ", "ae": "æ", "ah": "ə", "ao": "ɔ",
"aw": "aʊ", "ay": "aɪ", "ch": "ʧ", "dh": "ð", "eh": "ɛ", "er": "ər",
"hh": "h", "ih": "ɪ", "jh": "ʤ", "ng": "ŋ", "ow": "oʊ", "oy": "ɔɪ",
"sh": "ʃ", "th": "θ", "uh": "ʊ", "uw": "u", "zh": "ʒ", "iy": "i", "y": "j"}
final_list = [] # the final list of IPA tokens to be returned
for word_list in cmu_list:
ipa_word_list = [] # the word list for each word
for word in word_list:
if stress_marking:
word = stress.find_stress(word, type=stress_marking)
else:
if re.sub(r"\d*", "", word.replace("__IGNORE__", "")) == "":
pass # do not delete token if it's all numbers
else:
word = re.sub("[0-9]", "", word)
ipa_form = ''
if word.startswith("__IGNORE__"):
ipa_form = word.replace("__IGNORE__", "")
# mark words we couldn't transliterate with an asterisk:
if mark:
if not re.sub(r"\d*", "", ipa_form) == "":
ipa_form += "*"
else:
for piece in word.split(" "):
marked = False
unmarked = piece
if piece[0] in ["ˈ", "ˌ"]:
marked = True
mark = piece[0]
unmarked = piece[1:]
if unmarked in symbols:
if marked:
ipa_form += mark + symbols[unmarked]
else:
ipa_form += symbols[unmarked]
else:
ipa_form += piece
swap_list = [["ˈər", "əˈr"], ["ˈie", "iˈe"]]
for sym in swap_list:
if not ipa_form.startswith(sym[0]):
ipa_form = ipa_form.replace(sym[0], sym[1])
ipa_word_list.append(ipa_form)
final_list.append(sorted(list(set(ipa_word_list))))
return final_list
def get_top(ipa_list):
"""Returns only the one result for a query. If multiple entries for words are found, only the first is used."""
return ' '.join([word_list[-1] for word_list in ipa_list])
def get_all(ipa_list):
"""utilizes an algorithm to discover and return all possible combinations of IPA transcriptions"""
final_size = 1
for word_list in ipa_list:
final_size *= len(word_list)
list_all = ["" for s in range(final_size)]
for i in range(len(ipa_list)):
if i == 0:
swtich_rate = final_size / len(ipa_list[i])
else:
swtich_rate /= len(ipa_list[i])
k = 0
for j in range(final_size):
if (j+1) % int(swtich_rate) == 0:
k += 1
if k == len(ipa_list[i]):
k = 0
list_all[j] = list_all[j] + ipa_list[i][k] + " "
return sorted([sent[:-1] for sent in list_all])
def ipa_list(words_in, keep_punct=True, stress_marks='both', db_type="sql"):
"""Returns a list of all the discovered IPA transcriptions for each word."""
words = [preserve_punc(w.lower())[0] for w in words_in.split()] \
if type(words_in) == str else [preserve_punc(w.lower())[0] for w in words_in]
cmu = get_cmu([w[1] for w in words], db_type=db_type)
ipa = cmu_to_ipa(cmu, stress_marking=stress_marks)
if keep_punct:
ipa = _punct_replace_word(words, ipa)
return ipa
def isin_cmu(word, db_type="sql"):
"""checks if a word is in the CMU dictionary. Doesn't strip punctuation.
If given more than one word, returns True only if all words are present."""
if type(word) == str:
word = [preprocess(w) for w in word.split()]
results = fetch_words(word, db_type)
as_set = list(set(t[0] for t in results))
return len(as_set) == len(set(word))
def contains(ipa, db_type="sql"):
"""Get any words that contain the IPA string. Returns the word and the IPA as a list."""
asset = ModeType(mode=db_type).mode
if db_type.lower() == "sql":
asset.execute("SELECT word, ipa FROM eng_ipa WHERE "
"REPLACE(REPLACE(ipa, 'ˌ', ''), 'ˈ', '') "
"LIKE \"%{}%\"".format(str(ipa)))
return [list(res) for res in asset.fetchall()]
def convert(text, retrieve_all=False, keep_punct=True, stress_marks='both', mode="sql"):
"""takes either a string or list of English words and converts them to IPA"""
ipa = ipa_list(words_in=text, keep_punct=keep_punct,
stress_marks=stress_marks, db_type=mode)
return get_all(ipa) if retrieve_all else get_top(ipa)
def jonvert(text, retrieve_all=False, keep_punct=True, stress_marks='both'):
"""Forces use of JSON database for fetching phoneme data."""
return convert(text, retrieve_all, keep_punct, stress_marks, mode="json")
| # -*- coding: utf-8 -*-
import re
from os.path import join, abspath, dirname
import eng_to_ipa.stress as stress
from collections import defaultdict
class ModeType(object):
def __init__(self, mode):
self.name = mode
if mode.lower() == "sql":
import sqlite3
conn = sqlite3.connect(join(abspath(dirname(__file__)),
"./resources/CMU_dict.db"))
self.mode = conn.cursor()
elif mode.lower() == "json":
import json
json_file = open(join(abspath(dirname(__file__)),
"../eng_to_ipa/resources/CMU_dict.json"),
encoding="UTF-8")
self.mode = json.load(json_file)
def __str__(self):
return self.name
def preprocess(words):
"""Returns a string of words stripped of punctuation"""
punct_str = '!"#$%&\'()*+,-./:;<=>/?@[\\]^_`{|}~«» '
return ' '.join([w.strip(punct_str).lower() for w in words.split()])
def preserve_punc(words):
"""converts words to IPA and finds punctuation before and after the word."""
words_preserved = []
for w in words.split():
punct_list = ["", preprocess(w), ""]
before = re.search(r"^([^A-Za-z0-9]+)[A-Za-z]", w)
after = re.search(r"[A-Za-z]([^A-Za-z0-9]+)$", w)
if before:
punct_list[0] = str(before.group(1))
if after:
punct_list[2] = str(after.group(1))
words_preserved.append(punct_list)
return words_preserved
def apply_punct(triple, as_str=False):
"""places surrounding punctuation back on center on a list of preserve_punc triples"""
if type(triple[0]) == list:
for i, t in enumerate(triple):
triple[i] = str(''.join(triple[i]))
if as_str:
return ' '.join(triple)
return triple
if as_str:
return str(''.join(t for t in triple))
return [''.join(t for t in triple)]
def _punct_replace_word(original, transcription):
"""Get the IPA transcription of word with the original punctuation marks"""
for i, trans_list in enumerate(transcription):
for j, item in enumerate(trans_list):
triple = [original[i][0]] + [item] + [original[i][2]]
transcription[i][j] = apply_punct(triple, as_str=True)
return transcription
def fetch_words(words_in, db_type="sql"):
"""fetches a list of words from the database"""
asset = ModeType(mode=db_type).mode
if db_type.lower() == "sql":
quest = "?, " * len(words_in)
asset.execute("SELECT word, phonemes FROM dictionary "
"WHERE word IN ({0})".format(quest[:-2]), words_in)
result = asset.fetchall()
d = defaultdict(list)
for k, v in result:
d[k].append(v)
return list(d.items())
if db_type.lower() == "json":
words = []
for k, v in asset.items():
if k in words_in:
words.append((k, v))
return words
def get_cmu(tokens_in, db_type="sql"):
"""query the SQL database for the words and return the phonemes in the order of user_in"""
result = fetch_words(tokens_in, db_type)
ordered = []
for word in tokens_in:
this_word = [[i[1] for i in result if i[0] == word]][0]
if this_word:
ordered.append(this_word[0])
else:
ordered.append(["__IGNORE__" + word])
return ordered
def cmu_to_ipa(cmu_list, mark=True, stress_marking='all'):
"""converts the CMU word lists into IPA transcriptions"""
symbols = {"a": "ə", "ey": "eɪ", "aa": "ɑ", "ae": "æ", "ah": "ə", "ao": "ɔ",
"aw": "aʊ", "ay": "aɪ", "ch": "ʧ", "dh": "ð", "eh": "ɛ", "er": "ər",
"hh": "h", "ih": "ɪ", "jh": "ʤ", "ng": "ŋ", "ow": "oʊ", "oy": "ɔɪ",
"sh": "ʃ", "th": "θ", "uh": "ʊ", "uw": "u", "zh": "ʒ", "iy": "i", "y": "j"}
final_list = [] # the final list of IPA tokens to be returned
for word_list in cmu_list:
ipa_word_list = [] # the word list for each word
for word in word_list:
if stress_marking:
word = stress.find_stress(word, type=stress_marking)
else:
if re.sub(r"\d*", "", word.replace("__IGNORE__", "")) == "":
pass # do not delete token if it's all numbers
else:
word = re.sub("[0-9]", "", word)
ipa_form = ''
if word.startswith("__IGNORE__"):
ipa_form = word.replace("__IGNORE__", "")
# mark words we couldn't transliterate with an asterisk:
if mark:
if not re.sub(r"\d*", "", ipa_form) == "":
ipa_form += "*"
else:
for piece in word.split(" "):
marked = False
unmarked = piece
if piece[0] in ["ˈ", "ˌ"]:
marked = True
mark = piece[0]
unmarked = piece[1:]
if unmarked in symbols:
if marked:
ipa_form += mark + symbols[unmarked]
else:
ipa_form += symbols[unmarked]
else:
ipa_form += piece
swap_list = [["ˈər", "əˈr"], ["ˈie", "iˈe"]]
for sym in swap_list:
if not ipa_form.startswith(sym[0]):
ipa_form = ipa_form.replace(sym[0], sym[1])
ipa_word_list.append(ipa_form)
final_list.append(sorted(list(set(ipa_word_list))))
return final_list
def get_top(ipa_list):
"""Returns only the one result for a query. If multiple entries for words are found, only the first is used."""
return ' '.join([word_list[-1] for word_list in ipa_list])
def get_all(ipa_list):
"""utilizes an algorithm to discover and return all possible combinations of IPA transcriptions"""
final_size = 1
for word_list in ipa_list:
final_size *= len(word_list)
list_all = ["" for s in range(final_size)]
for i in range(len(ipa_list)):
if i == 0:
swtich_rate = final_size / len(ipa_list[i])
else:
swtich_rate /= len(ipa_list[i])
k = 0
for j in range(final_size):
if (j+1) % int(swtich_rate) == 0:
k += 1
if k == len(ipa_list[i]):
k = 0
list_all[j] = list_all[j] + ipa_list[i][k] + " "
return sorted([sent[:-1] for sent in list_all])
def ipa_list(words_in, keep_punct=True, stress_marks='both', db_type="sql"):
"""Returns a list of all the discovered IPA transcriptions for each word."""
words = [preserve_punc(w.lower())[0] for w in words_in.split()] \
if type(words_in) == str else [preserve_punc(w.lower())[0] for w in words_in]
cmu = get_cmu([w[1] for w in words], db_type=db_type)
ipa = cmu_to_ipa(cmu, stress_marking=stress_marks)
if keep_punct:
ipa = _punct_replace_word(words, ipa)
return ipa
def isin_cmu(word, db_type="sql"):
"""checks if a word is in the CMU dictionary. Doesn't strip punctuation.
If given more than one word, returns True only if all words are present."""
if type(word) == str:
word = [preprocess(w) for w in word.split()]
results = fetch_words(word, db_type)
as_set = list(set(t[0] for t in results))
return len(as_set) == len(set(word))
def contains(ipa, db_type="sql"):
"""Get any words that contain the IPA string. Returns the word and the IPA as a list."""
asset = ModeType(mode=db_type).mode
if db_type.lower() == "sql":
asset.execute("SELECT word, ipa FROM eng_ipa WHERE "
"REPLACE(REPLACE(ipa, 'ˌ', ''), 'ˈ', '') "
"LIKE \"%{}%\"".format(str(ipa)))
return [list(res) for res in asset.fetchall()]
def convert(text, retrieve_all=False, keep_punct=True, stress_marks='both', mode="sql"):
"""takes either a string or list of English words and converts them to IPA"""
ipa = ipa_list(words_in=text, keep_punct=keep_punct,
stress_marks=stress_marks, db_type=mode)
return get_all(ipa) if retrieve_all else get_top(ipa)
def jonvert(text, retrieve_all=False, keep_punct=True, stress_marks='both'):
"""Forces use of JSON database for fetching phoneme data."""
return convert(text, retrieve_all, keep_punct, stress_marks, mode="json")
| en | 0.823404 | # -*- coding: utf-8 -*- Returns a string of words stripped of punctuation converts words to IPA and finds punctuation before and after the word. places surrounding punctuation back on center on a list of preserve_punc triples Get the IPA transcription of word with the original punctuation marks fetches a list of words from the database query the SQL database for the words and return the phonemes in the order of user_in converts the CMU word lists into IPA transcriptions # the final list of IPA tokens to be returned # the word list for each word # do not delete token if it's all numbers # mark words we couldn't transliterate with an asterisk: Returns only the one result for a query. If multiple entries for words are found, only the first is used. utilizes an algorithm to discover and return all possible combinations of IPA transcriptions Returns a list of all the discovered IPA transcriptions for each word. checks if a word is in the CMU dictionary. Doesn't strip punctuation. If given more than one word, returns True only if all words are present. Get any words that contain the IPA string. Returns the word and the IPA as a list. takes either a string or list of English words and converts them to IPA Forces use of JSON database for fetching phoneme data. | 2.965063 | 3 |
src/colors_redis.py | stajc06/hue_sms | 1 | 6624882 | <reponame>stajc06/hue_sms<gh_stars>1-10
from redis import Redis
from name_converter import clean_name
class colorsRedis:
def __init__(self):
self.connect()
def connect(self):
self.db = Redis('localhost', 6379)
def numColors(self):
return len(self.db.hgetall("colors"))
def register_color(self, colorName, r, g, b):
key = clean_name(colorName)
value = str(r + "," + g + "," + b)
self.db.hset("colors", key, value)
def is_color(self, colorName):
key = clean_name(colorName)
return self.db.hexists("colors", key)
| from redis import Redis
from name_converter import clean_name
class colorsRedis:
def __init__(self):
self.connect()
def connect(self):
self.db = Redis('localhost', 6379)
def numColors(self):
return len(self.db.hgetall("colors"))
def register_color(self, colorName, r, g, b):
key = clean_name(colorName)
value = str(r + "," + g + "," + b)
self.db.hset("colors", key, value)
def is_color(self, colorName):
key = clean_name(colorName)
return self.db.hexists("colors", key) | none | 1 | 3.101751 | 3 | |
tests/pytests/test_vecsim.py | Mu-L/RediSearch | 0 | 6624883 | <filename>tests/pytests/test_vecsim.py
# -*- coding: utf-8 -*-
import base64
import random
import string
import unittest
from time import sleep
import numpy as np
from RLTest import Env
from common import *
from includes import *
def test_sanity(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', 'aaaaaaaa')
conn.execute_command('HSET', 'b', 'v', 'aaaabaaa')
conn.execute_command('HSET', 'c', 'v', 'aaaaabaa')
conn.execute_command('HSET', 'd', 'v', 'aaaaaaba')
res = [4L, 'a', ['v_score', '0', 'v', 'aaaaaaaa'],
'b', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'c', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'],
'd', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res1 = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 4]', 'SORTBY', 'v_score', 'ASC')
env.assertEqual(res, res1)
# todo: make test work on coordinator
res = [4L, 'c', ['v_score', '0', 'v', 'aaaaabaa'],
'b', ['v_score', '2.01242627636e+31', 'v', 'aaaabaaa'],
'a', ['v_score', '2.02824096037e+31', 'v', 'aaaaaaaa'],
'd', ['v_score', '1.31886368448e+36', 'v', 'aaaaaaba']]
res1 = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaabaa TOPK 4]', 'SORTBY', 'v_score', 'ASC')
env.assertEqual(res, res1)
expected_res = ['v_score', '0', 'v', 'aaaaaaaa']
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
message = 'aaaaaaaa'
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
# print message_bytes
# print base64_bytes
# print base64_message
# RANGE uses topk but translate to base64 before
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => { $base64:true; $efRuntime:100}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
#####################
## another example ##
#####################
message = 'aaaaabaa'
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + message +' TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], ['v_score', '0', 'v', 'aaaaabaa'])
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testEscape(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', '////////')
conn.execute_command('HSET', 'b', 'v', '++++++++')
conn.execute_command('HSET', 'c', 'v', 'abcdefgh')
conn.execute_command('HSET', 'd', 'v', 'aacdefgh')
conn.execute_command('HSET', 'e', 'v', 'aaadefgh')
messages = ['\+\+\+\+\+\+\+\+', '\/\/\/\/\/\/\/\/', 'abcdefgh', 'aacdefgh', 'aaadefgh']
for message in messages:
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + message + ' TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2][3], message.replace('\\', ''))
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
# print message_bytes
# print base64_bytes
# print base64_message
# RANGE uses topk but translate to base64 before
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message + ' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2][3], message.replace('\\', ''))
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testDel(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', 'aaaaaaaa')
conn.execute_command('HSET', 'b', 'v', 'aaaaaaba')
conn.execute_command('HSET', 'c', 'v', 'aaaabaaa')
conn.execute_command('HSET', 'd', 'v', 'aaaaabaa')
expected_res = ['a', ['v_score', '0', 'v', 'aaaaaaaa'], 'c', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'd', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'], 'b', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[1:3], expected_res[0:2])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 2]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 2)
env.assertEqual(res[1:5], expected_res[0:4])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 3]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 3)
env.assertEqual(res[1:7], expected_res[0:6])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 4]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 4)
env.assertEqual(res[1:9], expected_res[0:8])
conn.execute_command('DEL', 'a')
res = ['d', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'b', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'],
'c', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[1:3], res[1:3])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 2]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 2)
env.assertEqual(res[1:5], res[1:5])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 3]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 3)
env.assertEqual(res[1:7], res[1:7])
'''
This test returns 4 results instead of the expected 3. The HNSW library return the additional results.
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal([3L, 'b', ['v', 'abcdefgg'], 'c', ['v', 'aacdefgh'], 'd', ['v', 'azcdefgh']])
'''
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testDelReuse(env):
def test_query_empty(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([0L])
conn.execute_command('HSET', 'a', 'v', 'redislab')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([1L, 'a', ['v', 'redislab']])
conn.execute_command('DEL', 'a')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([0L])
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def del_insert(env):
conn = getConnectionByEnv(env)
conn.execute_command('DEL', 'a')
conn.execute_command('DEL', 'b')
conn.execute_command('DEL', 'c')
conn.execute_command('DEL', 'd')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal([0L])
res = [''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8))]
conn.execute_command('HSET', 'a', 'v', res[0])
conn.execute_command('HSET', 'b', 'v', res[1])
conn.execute_command('HSET', 'c', 'v', res[2])
conn.execute_command('HSET', 'd', 'v', res[3])
return res
# test start
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
def load_vectors_to_redis(env, n_vec, query_vec_index, vec_size):
conn = getConnectionByEnv(env)
for i in range(n_vec):
vector = np.random.rand(1, vec_size).astype(np.float32)
if i == query_vec_index:
query_vec = vector
# base64_vector = base64.b64encode(vector).decode('ascii')
conn.execute_command('HSET', i, 'vector', vector.tobytes())
return query_vec
def query_vector(env, idx, query_vec):
conn = getConnectionByEnv(env)
base64_vector = base64.b64encode(query_vec).decode('ascii')
base64_vector_escaped = base64_vector.replace("=", r"\=").replace("/", r"\/").replace("+", r"\+")
return conn.execute_command('FT.SEARCH', idx, '@vector:[' + base64_vector_escaped + ' RANGE 5]',
'SORTBY', 'vector_score', 'ASC', 'RETURN', 1, 'vector_score', 'LIMIT', 0, 5)
def testDelReuseLarge(env):
conn = getConnectionByEnv(env)
INDEX_NAME = 'items'
prefix = 'item'
n_vec = 5
query_vec_index = 3
vec_size = 1280
conn.execute_command('FT.CREATE', INDEX_NAME, 'ON', 'HASH',
'SCHEMA', 'vector', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1280', 'DISTANCE_METRIC', 'L2')
for _ in range(3):
query_vec = load_vectors_to_redis(env, n_vec, query_vec_index, vec_size)
res = query_vector(env, INDEX_NAME, query_vec)
print res
for i in range(4):
env.assertLessEqual(float(res[2 + i * 2][1]), float(res[2 + (i + 1) * 2][1]))
def testCreate(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx1', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'M', '16', 'EF_CONSTRUCTION', '200', 'EF_RUNTIME', '10')
info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR']]
assertInfoField(env, 'idx1', 'attributes', info)
env.assertEqual(env.cmd("FT.DEBUG", "VECSIM_INFO", "idx1", "v"), ['ALGORITHM', 'HNSW', 'TYPE', 'FLOAT32', 'DIMENSION', 1024L, 'METRIC', 'IP', 'INDEX_SIZE', 0L, 'M', 16L, 'EF_CONSTRUCTION', 200L, 'EF_RUNTIME', 10L, 'MAX_LEVEL', -1L, 'ENTRYPOINT', -1L, 'MEMORY', 43228L])
# Uncomment these tests when support for FLOAT64, INT32, INT64, is added.
# Trying to run these tests right now will cause 'Bad arguments for vector similarity HNSW index type' error
# conn.execute_command('FT.CREATE', 'idx2', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'INITIAL_CAP', '10', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20']]
# assertInfoField(env, 'idx2', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx3', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'INITIAL_CAP', '10', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50']]
# assertInfoField(env, 'idx3', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx4', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '16', 'EF_CONSTRUCTION', '200', 'EF_RUNTIME', '10']]
# assertInfoField(env, 'idx4', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx5', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'FLAT', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'BLOCK_SIZE', str(1024 * 1024)]]
# assertInfoField(env, 'idx5', 'attributes', info)
def testErrors(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
# missing init args
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR').error().contains('Bad arguments for vector similarity algorithm')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT').error().contains('Bad arguments for vector similarity number of parameters')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6').error().contains('Expected 6 parameters but got 0')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '1').error().contains('Bad number of arguments for vector similarity index: got 1 but expected even number')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '2', 'SIZE').error().contains('Bad arguments for algorithm FLAT: SIZE')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '2', 'TYPE').error().contains('Bad arguments for vector similarity FLAT index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DIM').error().contains('Bad arguments for vector similarity FLAT index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying TYPE argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying DIM argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DIM', '1024').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying DISTANCE_METRIC argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC').error().contains('Bad arguments for vector similarity FLAT index metric')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW').error().contains('Bad arguments for vector similarity number of parameters')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6').error().contains('Expected 6 parameters but got 0')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '1').error().contains('Bad number of arguments for vector similarity index: got 1 but expected even number')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '2', 'SIZE').error().contains('Bad arguments for algorithm HNSW: SIZE')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '2', 'TYPE').error().contains('Bad arguments for vector similarity HNSW index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DIM').error().contains('Bad arguments for vector similarity HNSW index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying TYPE argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying DIM argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DIM', '1024').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying DISTANCE_METRIC argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC').error().contains('Bad arguments for vector similarity HNSW index metric')
# invalid init args
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'DOUBLE', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity HNSW index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', 'str', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity HNSW index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'REDIS').error().contains('Bad arguments for vector similarity HNSW index metric')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'REDIS', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity algorithm')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '10', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', 'str', 'BLOCK_SIZE', '16') \
.error().contains('Bad arguments for vector similarity FLAT index initial cap')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '10', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'BLOCK_SIZE', 'str') \
.error().contains('Bad arguments for vector similarity FLAT index blocksize')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', 'str', 'M', '16', 'EF_CONSTRUCTION', '200') \
.error().contains('Bad arguments for vector similarity HNSW index initial cap')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', 'str', 'EF_CONSTRUCTION', '200') \
.error().contains('Bad arguments for vector similarity HNSW index m')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', '16', 'EF_CONSTRUCTION', 'str') \
.error().contains('Bad arguments for vector similarity HNSW index efConstruction')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', '16', 'EF_RUNTIME', 'str') \
.error().contains('Bad arguments for vector similarity HNSW index efRuntime')
# test wrong query word
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'M', '16', 'EF_CONSTRUCTION', '200')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh REDIS 4]').error().contains('Invalid Vector similarity type')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK str]').error().contains('Syntax error')
def load_vectors_into_redis(con, vector_field, dim, num_vectors):
data = np.float32(np.random.random((num_vectors, dim)))
id_vec_list = []
p = con.pipeline(transaction=False)
for i, vector in enumerate(data):
con.execute_command('HSET', i, vector_field, vector.tobytes(), 't', i % 10)
id_vec_list.append((i, vector))
p.execute()
return id_vec_list
def test_with_fields(env):
conn = getConnectionByEnv(env)
dimension = 128
qty = 100
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', dimension, 'DISTANCE_METRIC', 'L2', 't', 'TEXT')
load_vectors_into_redis(conn, 'v', dimension, qty)
query_data = np.float32(np.random.random((1, dimension)))
res = env.cmd('FT.SEARCH', 'idx', '5 @v:[$vec_param TOPK 100]',
'SORTBY', 'v_score', 'PARAMS', 2, 'vec_param', query_data.tobytes(),
'RETURN', 2, 'v_score', 't')
res_nocontent = env.cmd('FT.SEARCH', 'idx', '5 @v:[$vec_param TOPK 100]',
'SORTBY', 'v_score', 'PARAMS', 2, 'vec_param', query_data.tobytes(),
'NOCONTENT')
env.assertEqual(res[1::2], res_nocontent[1:])
env.assertEqual('t', res[2][2])
def get_vecsim_memory(env, index_key, field_name):
return float(to_dict(env.cmd("FT.DEBUG", "VECSIM_INFO", index_key, field_name))["MEMORY"])/0x100000
def test_memory_info(env):
# Skip on cluster as FT.DEBUG not supported.
env.skipOnCluster()
# This test flow adds two vectors and deletes them. The test checks for memory increase in Redis and RediSearch upon insertion and decrease upon delete.
conn = getConnectionByEnv(env)
conn = getConnectionByEnv(env)
dimension = 128
index_key = 'idx'
vector_field = 'v'
# Create index. Flat index implementation will free memory when deleting vectors, so it is a good candidate for this test with respect to memory consumption.
conn.execute_command('FT.CREATE', index_key, 'SCHEMA', vector_field, 'VECTOR', 'FLAT', '8', 'TYPE', 'FLOAT32', 'DIM', dimension, 'DISTANCE_METRIC', 'L2', 'BLOCK_SiZE', '1')
# Verify redis memory >= redisearch index memory
vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redisearch_memory, redis_memory)
env.assertEqual(redisearch_memory, vecsim_memory)
vector = np.float32(np.random.random((1, dimension)))
# Add vector.
conn.execute_command('HSET', 1, vector_field, vector.tobytes())
# Verify current memory readings > previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redis_memory, cur_redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(redisearch_memory, cur_redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(vecsim_memory, cur_vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Add vector.
conn.execute_command('HSET', 2, vector_field, vector.tobytes())
# Verify current memory readings > previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redis_memory, cur_redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(redisearch_memory, cur_redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(vecsim_memory, cur_vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Delete vector
conn.execute_command('DEL', 2)
# Verify current memory readings < previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(cur_redis_memory, redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(cur_redisearch_memory, redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(cur_vecsim_memory, vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Delete vector
conn.execute_command('DEL', 1)
# Verify current memory readings < previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(cur_redis_memory, redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(cur_redisearch_memory, redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(cur_vecsim_memory, vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
| <filename>tests/pytests/test_vecsim.py
# -*- coding: utf-8 -*-
import base64
import random
import string
import unittest
from time import sleep
import numpy as np
from RLTest import Env
from common import *
from includes import *
def test_sanity(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', 'aaaaaaaa')
conn.execute_command('HSET', 'b', 'v', 'aaaabaaa')
conn.execute_command('HSET', 'c', 'v', 'aaaaabaa')
conn.execute_command('HSET', 'd', 'v', 'aaaaaaba')
res = [4L, 'a', ['v_score', '0', 'v', 'aaaaaaaa'],
'b', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'c', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'],
'd', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res1 = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 4]', 'SORTBY', 'v_score', 'ASC')
env.assertEqual(res, res1)
# todo: make test work on coordinator
res = [4L, 'c', ['v_score', '0', 'v', 'aaaaabaa'],
'b', ['v_score', '2.01242627636e+31', 'v', 'aaaabaaa'],
'a', ['v_score', '2.02824096037e+31', 'v', 'aaaaaaaa'],
'd', ['v_score', '1.31886368448e+36', 'v', 'aaaaaaba']]
res1 = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaabaa TOPK 4]', 'SORTBY', 'v_score', 'ASC')
env.assertEqual(res, res1)
expected_res = ['v_score', '0', 'v', 'aaaaaaaa']
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
message = 'aaaaaaaa'
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
# print message_bytes
# print base64_bytes
# print base64_message
# RANGE uses topk but translate to base64 before
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message +' TOPK 1] => { $base64:true; $efRuntime:100}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], expected_res)
#####################
## another example ##
#####################
message = 'aaaaabaa'
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + message +' TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2], ['v_score', '0', 'v', 'aaaaabaa'])
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testEscape(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', '////////')
conn.execute_command('HSET', 'b', 'v', '++++++++')
conn.execute_command('HSET', 'c', 'v', 'abcdefgh')
conn.execute_command('HSET', 'd', 'v', 'aacdefgh')
conn.execute_command('HSET', 'e', 'v', 'aaadefgh')
messages = ['\+\+\+\+\+\+\+\+', '\/\/\/\/\/\/\/\/', 'abcdefgh', 'aacdefgh', 'aaadefgh']
for message in messages:
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + message + ' TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2][3], message.replace('\\', ''))
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
# print message_bytes
# print base64_bytes
# print base64_message
# RANGE uses topk but translate to base64 before
res = conn.execute_command('FT.SEARCH', 'idx', '@v:[' + base64_message + ' TOPK 1] => {$base64:true}', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[2][3], message.replace('\\', ''))
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testDel(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
conn.execute_command('HSET', 'a', 'v', 'aaaaaaaa')
conn.execute_command('HSET', 'b', 'v', 'aaaaaaba')
conn.execute_command('HSET', 'c', 'v', 'aaaabaaa')
conn.execute_command('HSET', 'd', 'v', 'aaaaabaa')
expected_res = ['a', ['v_score', '0', 'v', 'aaaaaaaa'], 'c', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'd', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'], 'b', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[1:3], expected_res[0:2])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 2]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 2)
env.assertEqual(res[1:5], expected_res[0:4])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 3]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 3)
env.assertEqual(res[1:7], expected_res[0:6])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 4]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 4)
env.assertEqual(res[1:9], expected_res[0:8])
conn.execute_command('DEL', 'a')
res = ['d', ['v_score', '3.09485009821e+26', 'v', 'aaaabaaa'],
'b', ['v_score', '2.02824096037e+31', 'v', 'aaaaabaa'],
'c', ['v_score', '1.32922799578e+36', 'v', 'aaaaaaba']]
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 1]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 1)
env.assertEqual(res[1:3], res[1:3])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 2]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 2)
env.assertEqual(res[1:5], res[1:5])
res = env.cmd('FT.SEARCH', 'idx', '@v:[aaaaaaaa TOPK 3]', 'SORTBY', 'v_score', 'ASC', 'LIMIT', 0, 3)
env.assertEqual(res[1:7], res[1:7])
'''
This test returns 4 results instead of the expected 3. The HNSW library return the additional results.
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal([3L, 'b', ['v', 'abcdefgg'], 'c', ['v', 'aacdefgh'], 'd', ['v', 'azcdefgh']])
'''
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def testDelReuse(env):
def test_query_empty(env):
conn = getConnectionByEnv(env)
vecsim_type = ['FLAT', 'HNSW']
for vs_type in vecsim_type:
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', vs_type, '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([0L])
conn.execute_command('HSET', 'a', 'v', 'redislab')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([1L, 'a', ['v', 'redislab']])
conn.execute_command('DEL', 'a')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 1]').equal([0L])
conn.execute_command('FT.DROPINDEX', 'idx', 'DD')
def del_insert(env):
conn = getConnectionByEnv(env)
conn.execute_command('DEL', 'a')
conn.execute_command('DEL', 'b')
conn.execute_command('DEL', 'c')
conn.execute_command('DEL', 'd')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal([0L])
res = [''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8)),
''.join(random.choice(string.lowercase) for x in range(8))]
conn.execute_command('HSET', 'a', 'v', res[0])
conn.execute_command('HSET', 'b', 'v', res[1])
conn.execute_command('HSET', 'c', 'v', res[2])
conn.execute_command('HSET', 'd', 'v', res[3])
return res
# test start
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '2','DISTANCE_METRIC', 'L2')
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
vecs = del_insert(env)
res = [4L, 'a', ['v', vecs[0]], 'b', ['v', vecs[1]], 'c', ['v', vecs[2]], 'd', ['v', vecs[3]]]
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal(res)
def load_vectors_to_redis(env, n_vec, query_vec_index, vec_size):
conn = getConnectionByEnv(env)
for i in range(n_vec):
vector = np.random.rand(1, vec_size).astype(np.float32)
if i == query_vec_index:
query_vec = vector
# base64_vector = base64.b64encode(vector).decode('ascii')
conn.execute_command('HSET', i, 'vector', vector.tobytes())
return query_vec
def query_vector(env, idx, query_vec):
conn = getConnectionByEnv(env)
base64_vector = base64.b64encode(query_vec).decode('ascii')
base64_vector_escaped = base64_vector.replace("=", r"\=").replace("/", r"\/").replace("+", r"\+")
return conn.execute_command('FT.SEARCH', idx, '@vector:[' + base64_vector_escaped + ' RANGE 5]',
'SORTBY', 'vector_score', 'ASC', 'RETURN', 1, 'vector_score', 'LIMIT', 0, 5)
def testDelReuseLarge(env):
conn = getConnectionByEnv(env)
INDEX_NAME = 'items'
prefix = 'item'
n_vec = 5
query_vec_index = 3
vec_size = 1280
conn.execute_command('FT.CREATE', INDEX_NAME, 'ON', 'HASH',
'SCHEMA', 'vector', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1280', 'DISTANCE_METRIC', 'L2')
for _ in range(3):
query_vec = load_vectors_to_redis(env, n_vec, query_vec_index, vec_size)
res = query_vector(env, INDEX_NAME, query_vec)
print res
for i in range(4):
env.assertLessEqual(float(res[2 + i * 2][1]), float(res[2 + (i + 1) * 2][1]))
def testCreate(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx1', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'M', '16', 'EF_CONSTRUCTION', '200', 'EF_RUNTIME', '10')
info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR']]
assertInfoField(env, 'idx1', 'attributes', info)
env.assertEqual(env.cmd("FT.DEBUG", "VECSIM_INFO", "idx1", "v"), ['ALGORITHM', 'HNSW', 'TYPE', 'FLOAT32', 'DIMENSION', 1024L, 'METRIC', 'IP', 'INDEX_SIZE', 0L, 'M', 16L, 'EF_CONSTRUCTION', 200L, 'EF_RUNTIME', 10L, 'MAX_LEVEL', -1L, 'ENTRYPOINT', -1L, 'MEMORY', 43228L])
# Uncomment these tests when support for FLOAT64, INT32, INT64, is added.
# Trying to run these tests right now will cause 'Bad arguments for vector similarity HNSW index type' error
# conn.execute_command('FT.CREATE', 'idx2', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'INITIAL_CAP', '10', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20']]
# assertInfoField(env, 'idx2', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx3', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'INITIAL_CAP', '10', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50']]
# assertInfoField(env, 'idx3', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx4', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '16', 'EF_CONSTRUCTION', '200', 'EF_RUNTIME', '10']]
# assertInfoField(env, 'idx4', 'attributes', info)
# conn.execute_command('FT.CREATE', 'idx5', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE')
# info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'FLAT', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'BLOCK_SIZE', str(1024 * 1024)]]
# assertInfoField(env, 'idx5', 'attributes', info)
def testErrors(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
# missing init args
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR').error().contains('Bad arguments for vector similarity algorithm')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT').error().contains('Bad arguments for vector similarity number of parameters')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6').error().contains('Expected 6 parameters but got 0')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '1').error().contains('Bad number of arguments for vector similarity index: got 1 but expected even number')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '2', 'SIZE').error().contains('Bad arguments for algorithm FLAT: SIZE')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '2', 'TYPE').error().contains('Bad arguments for vector similarity FLAT index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DIM').error().contains('Bad arguments for vector similarity FLAT index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying TYPE argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying DIM argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '4', 'TYPE', 'FLOAT32', 'DIM', '1024').error().contains('Missing mandatory parameter: cannot create FLAT index without specifying DISTANCE_METRIC argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC').error().contains('Bad arguments for vector similarity FLAT index metric')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW').error().contains('Bad arguments for vector similarity number of parameters')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6').error().contains('Expected 6 parameters but got 0')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '1').error().contains('Bad number of arguments for vector similarity index: got 1 but expected even number')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '2', 'SIZE').error().contains('Bad arguments for algorithm HNSW: SIZE')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '2', 'TYPE').error().contains('Bad arguments for vector similarity HNSW index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DIM').error().contains('Bad arguments for vector similarity HNSW index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying TYPE argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DISTANCE_METRIC', 'IP').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying DIM argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '4', 'TYPE', 'FLOAT32', 'DIM', '1024').error().contains('Missing mandatory parameter: cannot create HNSW index without specifying DISTANCE_METRIC argument')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC').error().contains('Bad arguments for vector similarity HNSW index metric')
# invalid init args
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'DOUBLE', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity HNSW index type')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', 'str', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity HNSW index dim')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'REDIS').error().contains('Bad arguments for vector similarity HNSW index metric')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'REDIS', '6', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP').error().contains('Bad arguments for vector similarity algorithm')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '10', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', 'str', 'BLOCK_SIZE', '16') \
.error().contains('Bad arguments for vector similarity FLAT index initial cap')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '10', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'BLOCK_SIZE', 'str') \
.error().contains('Bad arguments for vector similarity FLAT index blocksize')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', 'str', 'M', '16', 'EF_CONSTRUCTION', '200') \
.error().contains('Bad arguments for vector similarity HNSW index initial cap')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', 'str', 'EF_CONSTRUCTION', '200') \
.error().contains('Bad arguments for vector similarity HNSW index m')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', '16', 'EF_CONSTRUCTION', 'str') \
.error().contains('Bad arguments for vector similarity HNSW index efConstruction')
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '100', 'M', '16', 'EF_RUNTIME', 'str') \
.error().contains('Bad arguments for vector similarity HNSW index efRuntime')
# test wrong query word
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '12', 'TYPE', 'FLOAT32', 'DIM', '1024', 'DISTANCE_METRIC', 'IP', 'INITIAL_CAP', '10', 'M', '16', 'EF_CONSTRUCTION', '200')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh REDIS 4]').error().contains('Invalid Vector similarity type')
env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK str]').error().contains('Syntax error')
def load_vectors_into_redis(con, vector_field, dim, num_vectors):
data = np.float32(np.random.random((num_vectors, dim)))
id_vec_list = []
p = con.pipeline(transaction=False)
for i, vector in enumerate(data):
con.execute_command('HSET', i, vector_field, vector.tobytes(), 't', i % 10)
id_vec_list.append((i, vector))
p.execute()
return id_vec_list
def test_with_fields(env):
conn = getConnectionByEnv(env)
dimension = 128
qty = 100
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'FLOAT32', 'DIM', dimension, 'DISTANCE_METRIC', 'L2', 't', 'TEXT')
load_vectors_into_redis(conn, 'v', dimension, qty)
query_data = np.float32(np.random.random((1, dimension)))
res = env.cmd('FT.SEARCH', 'idx', '5 @v:[$vec_param TOPK 100]',
'SORTBY', 'v_score', 'PARAMS', 2, 'vec_param', query_data.tobytes(),
'RETURN', 2, 'v_score', 't')
res_nocontent = env.cmd('FT.SEARCH', 'idx', '5 @v:[$vec_param TOPK 100]',
'SORTBY', 'v_score', 'PARAMS', 2, 'vec_param', query_data.tobytes(),
'NOCONTENT')
env.assertEqual(res[1::2], res_nocontent[1:])
env.assertEqual('t', res[2][2])
def get_vecsim_memory(env, index_key, field_name):
return float(to_dict(env.cmd("FT.DEBUG", "VECSIM_INFO", index_key, field_name))["MEMORY"])/0x100000
def test_memory_info(env):
# Skip on cluster as FT.DEBUG not supported.
env.skipOnCluster()
# This test flow adds two vectors and deletes them. The test checks for memory increase in Redis and RediSearch upon insertion and decrease upon delete.
conn = getConnectionByEnv(env)
conn = getConnectionByEnv(env)
dimension = 128
index_key = 'idx'
vector_field = 'v'
# Create index. Flat index implementation will free memory when deleting vectors, so it is a good candidate for this test with respect to memory consumption.
conn.execute_command('FT.CREATE', index_key, 'SCHEMA', vector_field, 'VECTOR', 'FLAT', '8', 'TYPE', 'FLOAT32', 'DIM', dimension, 'DISTANCE_METRIC', 'L2', 'BLOCK_SiZE', '1')
# Verify redis memory >= redisearch index memory
vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redisearch_memory, redis_memory)
env.assertEqual(redisearch_memory, vecsim_memory)
vector = np.float32(np.random.random((1, dimension)))
# Add vector.
conn.execute_command('HSET', 1, vector_field, vector.tobytes())
# Verify current memory readings > previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redis_memory, cur_redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(redisearch_memory, cur_redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(vecsim_memory, cur_vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Add vector.
conn.execute_command('HSET', 2, vector_field, vector.tobytes())
# Verify current memory readings > previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(redis_memory, cur_redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(redisearch_memory, cur_redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(vecsim_memory, cur_vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Delete vector
conn.execute_command('DEL', 2)
# Verify current memory readings < previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(cur_redis_memory, redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(cur_redisearch_memory, redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(cur_vecsim_memory, vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
# Delete vector
conn.execute_command('DEL', 1)
# Verify current memory readings < previous memory readings.
cur_redis_memory = get_redis_memory_in_mb(env)
env.assertLessEqual(cur_redis_memory, redis_memory)
cur_redisearch_memory = get_redisearch_vector_index_memory(env, index_key=index_key)
env.assertLessEqual(cur_redisearch_memory, redisearch_memory)
cur_vecsim_memory = get_vecsim_memory(env, index_key=index_key, field_name=vector_field)
env.assertLessEqual(cur_vecsim_memory, vecsim_memory)
redis_memory = cur_redis_memory
redisearch_memory = cur_redisearch_memory
vecsim_memory = cur_vecsim_memory
# Verify redis memory >= redisearch index memory
env.assertLessEqual(redisearch_memory, redis_memory)
#verify vecsim memory == redisearch memory
env.assertEqual(cur_vecsim_memory, cur_redisearch_memory)
| en | 0.413546 | # -*- coding: utf-8 -*- # todo: make test work on coordinator # print message_bytes # print base64_bytes # print base64_message # RANGE uses topk but translate to base64 before ##################### ## another example ## ##################### # print message_bytes # print base64_bytes # print base64_message # RANGE uses topk but translate to base64 before This test returns 4 results instead of the expected 3. The HNSW library return the additional results. env.expect('FT.SEARCH', 'idx', '@v:[abcdefgh TOPK 4]').equal([3L, 'b', ['v', 'abcdefgg'], 'c', ['v', 'aacdefgh'], 'd', ['v', 'azcdefgh']]) # test start # base64_vector = base64.b64encode(vector).decode('ascii') # Uncomment these tests when support for FLOAT64, INT32, INT64, is added. # Trying to run these tests right now will cause 'Bad arguments for vector similarity HNSW index type' error # conn.execute_command('FT.CREATE', 'idx2', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'INITIAL_CAP', '10', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20') # info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'FLOAT64', 'DIM', '4096', 'DISTANCE_METRIC', 'L2', 'M', '32', 'EF_CONSTRUCTION', '100', 'EF_RUNTIME', '20']] # assertInfoField(env, 'idx2', 'attributes', info) # conn.execute_command('FT.CREATE', 'idx3', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '14', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'INITIAL_CAP', '10', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50') # info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '64', 'EF_CONSTRUCTION', '400', 'EF_RUNTIME', '50']] # assertInfoField(env, 'idx3', 'attributes', info) # conn.execute_command('FT.CREATE', 'idx4', 'SCHEMA', 'v', 'VECTOR', 'HNSW', '6', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE') # info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'HNSW', 'TYPE', 'INT64', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'M', '16', 'EF_CONSTRUCTION', '200', 'EF_RUNTIME', '10']] # assertInfoField(env, 'idx4', 'attributes', info) # conn.execute_command('FT.CREATE', 'idx5', 'SCHEMA', 'v', 'VECTOR', 'FLAT', '6', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE') # info = [['identifier', 'v', 'attribute', 'v', 'type', 'VECTOR', 'ALGORITHM', 'FLAT', 'TYPE', 'INT32', 'DIM', '64', 'DISTANCE_METRIC', 'COSINE', 'BLOCK_SIZE', str(1024 * 1024)]] # assertInfoField(env, 'idx5', 'attributes', info) # missing init args # invalid init args # test wrong query word # Skip on cluster as FT.DEBUG not supported. # This test flow adds two vectors and deletes them. The test checks for memory increase in Redis and RediSearch upon insertion and decrease upon delete. # Create index. Flat index implementation will free memory when deleting vectors, so it is a good candidate for this test with respect to memory consumption. # Verify redis memory >= redisearch index memory # Add vector. # Verify current memory readings > previous memory readings. # Verify redis memory >= redisearch index memory #verify vecsim memory == redisearch memory # Add vector. # Verify current memory readings > previous memory readings. # Verify redis memory >= redisearch index memory #verify vecsim memory == redisearch memory # Delete vector # Verify current memory readings < previous memory readings. # Verify redis memory >= redisearch index memory #verify vecsim memory == redisearch memory # Delete vector # Verify current memory readings < previous memory readings. # Verify redis memory >= redisearch index memory #verify vecsim memory == redisearch memory | 2.054932 | 2 |
aesara/tensor/sharedvar.py | anirudhacharya/Theano-PyMC | 0 | 6624884 | import traceback
import warnings
import numpy as np
from aesara.compile import SharedVariable, shared_constructor
from aesara.misc.safe_asarray import _asarray
from aesara.tensor import _get_vector_length
from aesara.tensor.type import TensorType
from aesara.tensor.var import _tensor_py_operators
def load_shared_variable(val):
"""
This function is only here to keep some pickles loading
after a failed fix done in August 2011.
It can be removed after sufficient time has passed.
"""
return tensor_constructor(val)
# _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__
class TensorSharedVariable(_tensor_py_operators, SharedVariable):
pass
@_get_vector_length.register(TensorSharedVariable)
def _get_vector_length_TensorSharedVariable(var_inst, var):
return len(var.get_value(borrow=True))
@shared_constructor
def tensor_constructor(
value,
name=None,
strict=False,
allow_downcast=None,
borrow=False,
shape=None,
target="cpu",
broadcastable=None,
):
"""
SharedVariable Constructor for TensorType.
Notes
-----
The default is to assume that the `shape` value might be resized in any
dimension, so the default shape is ``(None,) * len(value.shape)``. The
optional `shape` argument will override this default.
"""
if broadcastable is not None:
warnings.warn(
"The `broadcastable` keyword is deprecated; use `shape`.",
DeprecationWarning,
)
shape = broadcastable
if target != "cpu":
raise TypeError("not for cpu")
if not isinstance(value, np.ndarray):
raise TypeError()
# if no shape is given, then the default is to assume that
# the value might be resized in any dimension in the future.
#
if shape is None:
shape = (False,) * len(value.shape)
type = TensorType(value.dtype, shape=shape)
return TensorSharedVariable(
type=type,
value=np.array(value, copy=(not borrow)),
name=name,
strict=strict,
allow_downcast=allow_downcast,
)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works
# as long as we don't do purely scalar-scalar operations
# _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__
#
# N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the
# aesara.scalar.sharedvar file. It is not registered as a shared_constructor,
# this one is.
class ScalarSharedVariable(_tensor_py_operators, SharedVariable):
pass
@shared_constructor
def scalar_constructor(
value, name=None, strict=False, allow_downcast=None, borrow=False, target="cpu"
):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
We ignore the borrow parameter as we convert ``value`` to an
ndarray (this is a new object). This respects the semantic of
borrow, as it is a hint to Aesara that we can reuse it.
"""
if target != "cpu":
raise TypeError("not for cpu")
if not isinstance(value, (np.number, float, int, complex)):
raise TypeError()
try:
dtype = value.dtype
except Exception:
dtype = np.asarray(value).dtype
dtype = str(dtype)
value = _asarray(value, dtype=dtype)
tensor_type = TensorType(dtype=str(value.dtype), shape=[])
try:
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval = ScalarSharedVariable(
type=tensor_type,
value=np.array(value, copy=True),
name=name,
strict=strict,
allow_downcast=allow_downcast,
)
return rval
except Exception:
traceback.print_exc()
raise
| import traceback
import warnings
import numpy as np
from aesara.compile import SharedVariable, shared_constructor
from aesara.misc.safe_asarray import _asarray
from aesara.tensor import _get_vector_length
from aesara.tensor.type import TensorType
from aesara.tensor.var import _tensor_py_operators
def load_shared_variable(val):
"""
This function is only here to keep some pickles loading
after a failed fix done in August 2011.
It can be removed after sufficient time has passed.
"""
return tensor_constructor(val)
# _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__
class TensorSharedVariable(_tensor_py_operators, SharedVariable):
pass
@_get_vector_length.register(TensorSharedVariable)
def _get_vector_length_TensorSharedVariable(var_inst, var):
return len(var.get_value(borrow=True))
@shared_constructor
def tensor_constructor(
value,
name=None,
strict=False,
allow_downcast=None,
borrow=False,
shape=None,
target="cpu",
broadcastable=None,
):
"""
SharedVariable Constructor for TensorType.
Notes
-----
The default is to assume that the `shape` value might be resized in any
dimension, so the default shape is ``(None,) * len(value.shape)``. The
optional `shape` argument will override this default.
"""
if broadcastable is not None:
warnings.warn(
"The `broadcastable` keyword is deprecated; use `shape`.",
DeprecationWarning,
)
shape = broadcastable
if target != "cpu":
raise TypeError("not for cpu")
if not isinstance(value, np.ndarray):
raise TypeError()
# if no shape is given, then the default is to assume that
# the value might be resized in any dimension in the future.
#
if shape is None:
shape = (False,) * len(value.shape)
type = TensorType(value.dtype, shape=shape)
return TensorSharedVariable(
type=type,
value=np.array(value, copy=(not borrow)),
name=name,
strict=strict,
allow_downcast=allow_downcast,
)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works
# as long as we don't do purely scalar-scalar operations
# _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__
#
# N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the
# aesara.scalar.sharedvar file. It is not registered as a shared_constructor,
# this one is.
class ScalarSharedVariable(_tensor_py_operators, SharedVariable):
pass
@shared_constructor
def scalar_constructor(
value, name=None, strict=False, allow_downcast=None, borrow=False, target="cpu"
):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
We ignore the borrow parameter as we convert ``value`` to an
ndarray (this is a new object). This respects the semantic of
borrow, as it is a hint to Aesara that we can reuse it.
"""
if target != "cpu":
raise TypeError("not for cpu")
if not isinstance(value, (np.number, float, int, complex)):
raise TypeError()
try:
dtype = value.dtype
except Exception:
dtype = np.asarray(value).dtype
dtype = str(dtype)
value = _asarray(value, dtype=dtype)
tensor_type = TensorType(dtype=str(value.dtype), shape=[])
try:
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval = ScalarSharedVariable(
type=tensor_type,
value=np.array(value, copy=True),
name=name,
strict=strict,
allow_downcast=allow_downcast,
)
return rval
except Exception:
traceback.print_exc()
raise
| en | 0.879861 | This function is only here to keep some pickles loading after a failed fix done in August 2011. It can be removed after sufficient time has passed. # _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__ SharedVariable Constructor for TensorType. Notes ----- The default is to assume that the `shape` value might be resized in any dimension, so the default shape is ``(None,) * len(value.shape)``. The optional `shape` argument will override this default. # if no shape is given, then the default is to assume that # the value might be resized in any dimension in the future. # # TensorSharedVariable brings in the tensor operators, is not ideal, but works # as long as we don't do purely scalar-scalar operations # _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__ # # N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the # aesara.scalar.sharedvar file. It is not registered as a shared_constructor, # this one is. SharedVariable constructor for scalar values. Default: int64 or float64. Notes ----- We implement this using 0-d tensors for now. We ignore the borrow parameter as we convert ``value`` to an ndarray (this is a new object). This respects the semantic of borrow, as it is a hint to Aesara that we can reuse it. # Do not pass the dtype to asarray because we want this to fail if # strict is True and the types do not match. | 2.236089 | 2 |
homeassistant/components/magichome/scene.py | LIULi-VVET/home-assistant | 0 | 6624885 | <filename>homeassistant/components/magichome/scene.py
"""Support for the MagicHome scenes."""
from homeassistant.components.scene import DOMAIN, Scene
from . import DATA_MAGICHOME, MagicHomeDevice
ENTITY_ID_FORMAT = DOMAIN + ".{}"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up MagicHome scenes."""
if discovery_info is None:
return
magichome = hass.data[DATA_MAGICHOME]
dev_ids = discovery_info.get("dev_ids")
devices = []
for dev_id in dev_ids:
device = magichome.get_device_by_id(dev_id)
if device is None:
continue
devices.append(MagicHomeScene(device))
add_entities(devices)
class MagicHomeScene(MagicHomeDevice, Scene):
"""MagicHome Scene."""
def __init__(self, magichome):
"""Init MagicHome scene."""
super().__init__(magichome)
self.entity_id = ENTITY_ID_FORMAT.format(magichome.object_id())
def activate(self):
"""Activate the scene."""
self.magichome.activate()
| <filename>homeassistant/components/magichome/scene.py
"""Support for the MagicHome scenes."""
from homeassistant.components.scene import DOMAIN, Scene
from . import DATA_MAGICHOME, MagicHomeDevice
ENTITY_ID_FORMAT = DOMAIN + ".{}"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up MagicHome scenes."""
if discovery_info is None:
return
magichome = hass.data[DATA_MAGICHOME]
dev_ids = discovery_info.get("dev_ids")
devices = []
for dev_id in dev_ids:
device = magichome.get_device_by_id(dev_id)
if device is None:
continue
devices.append(MagicHomeScene(device))
add_entities(devices)
class MagicHomeScene(MagicHomeDevice, Scene):
"""MagicHome Scene."""
def __init__(self, magichome):
"""Init MagicHome scene."""
super().__init__(magichome)
self.entity_id = ENTITY_ID_FORMAT.format(magichome.object_id())
def activate(self):
"""Activate the scene."""
self.magichome.activate()
| en | 0.310039 | Support for the MagicHome scenes. Set up MagicHome scenes. MagicHome Scene. Init MagicHome scene. Activate the scene. | 2.276738 | 2 |
rcsb/app/file/serverStatus.py | rcsb/py-rcsb_app_file | 0 | 6624886 | ##
# File: serverStatus.py
# Date: 11-Aug-2020
#
##
# pylint: skip-file
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import logging
from fastapi import APIRouter
from . import ConfigProvider
from rcsb.utils.io.ProcessStatusUtil import ProcessStatusUtil
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/status", tags=["status"])
def serverStatus():
cP = ConfigProvider.ConfigProvider()
psU = ProcessStatusUtil()
psD = psU.getInfo()
return {"msg": "Status is nominal!", "version": cP.getVersion(), "status": psD}
@router.get("/", tags=["status"])
def rootServerStatus():
return {"msg": "Service is up!"}
@router.get("/healthcheck", tags=["status"])
def rootHealthCheck():
return "UP"
| ##
# File: serverStatus.py
# Date: 11-Aug-2020
#
##
# pylint: skip-file
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import logging
from fastapi import APIRouter
from . import ConfigProvider
from rcsb.utils.io.ProcessStatusUtil import ProcessStatusUtil
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/status", tags=["status"])
def serverStatus():
cP = ConfigProvider.ConfigProvider()
psU = ProcessStatusUtil()
psD = psU.getInfo()
return {"msg": "Status is nominal!", "version": cP.getVersion(), "status": psD}
@router.get("/", tags=["status"])
def rootServerStatus():
return {"msg": "Service is up!"}
@router.get("/healthcheck", tags=["status"])
def rootHealthCheck():
return "UP"
| en | 0.41752 | ## # File: serverStatus.py # Date: 11-Aug-2020 # ## # pylint: skip-file | 2.154239 | 2 |
myapp/utils.py | codehugger/Flask-Starter | 2 | 6624887 | # -*- coding: utf-8 -*-
import json
import datetime
from flask import make_response
from myapp.extensions import db
class JSONAppEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, db.Model):
return obj.serialize()
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def json_response(data, status=200, headers=None):
if headers is None:
headers = dict()
headers['Content-Type'] = 'application/json'
data = json.dumps(data, cls=JSONAppEncoder)
return make_response(data, status, headers)
| # -*- coding: utf-8 -*-
import json
import datetime
from flask import make_response
from myapp.extensions import db
class JSONAppEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, db.Model):
return obj.serialize()
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def json_response(data, status=200, headers=None):
if headers is None:
headers = dict()
headers['Content-Type'] = 'application/json'
data = json.dumps(data, cls=JSONAppEncoder)
return make_response(data, status, headers)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.730525 | 3 |
universities/urls.py | MadanNeupane/College-Finder | 0 | 6624888 | <gh_stars>0
from django.urls import path
from . import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('', login_required(views.universities_page), name='universities'),
path('<slug:slug>/', login_required(views.university_detail), name='university_detail'),
]
| from django.urls import path
from . import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('', login_required(views.universities_page), name='universities'),
path('<slug:slug>/', login_required(views.university_detail), name='university_detail'),
] | none | 1 | 1.830414 | 2 | |
src/log.py | winsbe01/nq | 0 | 6624889 | import logging
class NqLog:
def setup(self):
nqlog = logging.getLogger("nq")
nqlog.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
console.setFormatter(formatter)
nqlog.addHandler(console)
| import logging
class NqLog:
def setup(self):
nqlog = logging.getLogger("nq")
nqlog.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
console.setFormatter(formatter)
nqlog.addHandler(console)
| none | 1 | 2.71735 | 3 | |
scripts/delete/jobbernetes.py | realmar/Jobbernetes | 1 | 6624890 | #!/usr/bin/env python3
import __init__
from lib.jobbernetes import task
def delete():
task("delete")
if __name__ == "__main__":
delete()
| #!/usr/bin/env python3
import __init__
from lib.jobbernetes import task
def delete():
task("delete")
if __name__ == "__main__":
delete()
| fr | 0.221828 | #!/usr/bin/env python3 | 1.38961 | 1 |
src/pretix/base/metrics.py | prereg/prereg | 0 | 6624891 | import json
import math
import time
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.db import connection
from pretix.base.models import Event, Invoice, Order, OrderPosition, Organizer
from pretix.celery_app import app
if settings.HAS_REDIS:
import django_redis
redis = django_redis.get_redis_connection("redis")
REDIS_KEY = "pretix_metrics"
_INF = float("inf")
_MINUS_INF = float("-inf")
def _float_to_go_string(d):
# inspired by https://github.com/prometheus/client_python/blob/master/prometheus_client/core.py
if d == _INF:
return '+Inf'
elif d == _MINUS_INF:
return '-Inf'
elif math.isnan(d):
return 'NaN'
else:
return repr(float(d))
class Metric(object):
"""
Base Metrics Object
"""
def __init__(self, name, helpstring, labelnames=None):
self.name = name
self.helpstring = helpstring
self.labelnames = labelnames or []
def __repr__(self):
return self.name + "{" + ",".join(self.labelnames) + "}"
def _check_label_consistency(self, labels):
"""
Checks if the given labels provides exactly the labels that are required.
"""
# test if every required label is provided
for labelname in self.labelnames:
if labelname not in labels:
raise ValueError("Label {0} not specified.".format(labelname))
# now test if no further labels are required
if len(labels) != len(self.labelnames):
raise ValueError("Unknown labels used: {}".format(", ".join(set(labels) - set(self.labelnames))))
def _construct_metric_identifier(self, metricname, labels=None, labelnames=None):
"""
Constructs the scrapable metricname usable in the output format.
"""
if not labels:
return metricname
else:
named_labels = []
for labelname in (labelnames or self.labelnames):
named_labels.append('{}="{}"'.format(labelname, labels[labelname]))
return metricname + "{" + ",".join(named_labels) + "}"
def _inc_in_redis(self, key, amount, pipeline=None):
"""
Increments given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hincrbyfloat(REDIS_KEY, key, amount)
def _set_in_redis(self, key, value, pipeline=None):
"""
Sets given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hset(REDIS_KEY, key, value)
def _get_redis_pipeline(self):
if settings.HAS_REDIS:
return redis.pipeline()
def _execute_redis_pipeline(self, pipeline):
if settings.HAS_REDIS:
return pipeline.execute()
class Counter(Metric):
"""
Counter Metric Object
Counters can only be increased, they can neither be set to a specific value
nor decreased.
"""
def inc(self, amount=1, **kwargs):
"""
Increments Counter by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Counter cannot be increased by negative values.")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
class Gauge(Metric):
"""
Gauge Metric Object
Gauges can be set to a specific value, increased and decreased.
"""
def set(self, value=1, **kwargs):
"""
Sets Gauge to a specific value for the labels specified in kwargs.
"""
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._set_in_redis(fullmetric, value)
def inc(self, amount=1, **kwargs):
"""
Increments Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use dec().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
def dec(self, amount=1, **kwargs):
"""
Decrements Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount * -1)
class Histogram(Metric):
"""
Histogram Metric Object
"""
def __init__(self, name, helpstring, labelnames=None,
buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 30.0, _INF)):
if list(buckets) != sorted(buckets):
# This is probably an error on the part of the user,
# so raise rather than sorting for them.
raise ValueError('Buckets not in sorted order')
if buckets and buckets[-1] != _INF:
buckets.append(_INF)
if len(buckets) < 2:
raise ValueError('Must have at least two buckets')
self.buckets = buckets
super().__init__(name, helpstring, labelnames)
def observe(self, amount, **kwargs):
"""
Stores a value in the histogram for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
pipe = self._get_redis_pipeline()
countmetric = self._construct_metric_identifier(self.name + '_count', kwargs)
self._inc_in_redis(countmetric, 1, pipeline=pipe)
summetric = self._construct_metric_identifier(self.name + '_sum', kwargs)
self._inc_in_redis(summetric, amount, pipeline=pipe)
kwargs_le = dict(kwargs.items())
for i, bound in enumerate(self.buckets):
if amount <= bound:
kwargs_le['le'] = _float_to_go_string(bound)
bmetric = self._construct_metric_identifier(self.name + '_bucket', kwargs_le,
labelnames=self.labelnames + ["le"])
self._inc_in_redis(bmetric, 1, pipeline=pipe)
self._execute_redis_pipeline(pipe)
def estimate_count_fast(type):
"""
See https://wiki.postgresql.org/wiki/Count_estimate
"""
if 'postgres' in settings.DATABASES['default']['ENGINE']:
cursor = connection.cursor()
cursor.execute("select reltuples from pg_class where relname='%s';" % type._meta.db_table)
row = cursor.fetchone()
return int(row[0])
else:
return type.objects.count()
def metric_values():
"""
Produces the the values to be presented to the monitoring system
"""
metrics = defaultdict(dict)
# Metrics from redis
if settings.HAS_REDIS:
for key, value in redis.hscan_iter(REDIS_KEY):
dkey = key.decode("utf-8")
splitted = dkey.split("{", 2)
value = float(value.decode("utf-8"))
metrics[splitted[0]]["{" + splitted[1]] = value
# Aliases
aliases = {
'pretix_view_requests_total': 'pretix_view_duration_seconds_count'
}
for a, atarget in aliases.items():
metrics[a] = metrics[atarget]
# Throwaway metrics
exact_tables = [
Order, OrderPosition, Invoice, Event, Organizer
]
for m in apps.get_models(): # Count all models
if any(issubclass(m, p) for p in exact_tables):
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = m.objects.count()
else:
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = estimate_count_fast(m)
if settings.HAS_CELERY:
client = app.broker_connection().channel().client
for q in settings.CELERY_TASK_QUEUES:
llen = client.llen(q.name)
lfirst = client.lindex(q.name, -1)
metrics['pretix_celery_tasks_queued_count']['{queue="%s"}' % q.name] = llen
if lfirst:
ldata = json.loads(lfirst)
dt = time.time() - ldata.get('created', 0)
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = dt
else:
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = 0
return metrics
"""
Provided metrics
"""
pretix_view_duration_seconds = Histogram("pretix_view_duration_seconds", "Return time of views.",
["status_code", "method", "url_name"])
pretix_task_runs_total = Counter("pretix_task_runs_total", "Total calls to a celery task",
["task_name", "status"])
pretix_task_duration_seconds = Histogram("pretix_task_duration_seconds", "Call time of a celery task",
["task_name"])
| import json
import math
import time
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.db import connection
from pretix.base.models import Event, Invoice, Order, OrderPosition, Organizer
from pretix.celery_app import app
if settings.HAS_REDIS:
import django_redis
redis = django_redis.get_redis_connection("redis")
REDIS_KEY = "pretix_metrics"
_INF = float("inf")
_MINUS_INF = float("-inf")
def _float_to_go_string(d):
# inspired by https://github.com/prometheus/client_python/blob/master/prometheus_client/core.py
if d == _INF:
return '+Inf'
elif d == _MINUS_INF:
return '-Inf'
elif math.isnan(d):
return 'NaN'
else:
return repr(float(d))
class Metric(object):
"""
Base Metrics Object
"""
def __init__(self, name, helpstring, labelnames=None):
self.name = name
self.helpstring = helpstring
self.labelnames = labelnames or []
def __repr__(self):
return self.name + "{" + ",".join(self.labelnames) + "}"
def _check_label_consistency(self, labels):
"""
Checks if the given labels provides exactly the labels that are required.
"""
# test if every required label is provided
for labelname in self.labelnames:
if labelname not in labels:
raise ValueError("Label {0} not specified.".format(labelname))
# now test if no further labels are required
if len(labels) != len(self.labelnames):
raise ValueError("Unknown labels used: {}".format(", ".join(set(labels) - set(self.labelnames))))
def _construct_metric_identifier(self, metricname, labels=None, labelnames=None):
"""
Constructs the scrapable metricname usable in the output format.
"""
if not labels:
return metricname
else:
named_labels = []
for labelname in (labelnames or self.labelnames):
named_labels.append('{}="{}"'.format(labelname, labels[labelname]))
return metricname + "{" + ",".join(named_labels) + "}"
def _inc_in_redis(self, key, amount, pipeline=None):
"""
Increments given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hincrbyfloat(REDIS_KEY, key, amount)
def _set_in_redis(self, key, value, pipeline=None):
"""
Sets given key in Redis.
"""
if settings.HAS_REDIS:
if not pipeline:
pipeline = redis
pipeline.hset(REDIS_KEY, key, value)
def _get_redis_pipeline(self):
if settings.HAS_REDIS:
return redis.pipeline()
def _execute_redis_pipeline(self, pipeline):
if settings.HAS_REDIS:
return pipeline.execute()
class Counter(Metric):
"""
Counter Metric Object
Counters can only be increased, they can neither be set to a specific value
nor decreased.
"""
def inc(self, amount=1, **kwargs):
"""
Increments Counter by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Counter cannot be increased by negative values.")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
class Gauge(Metric):
"""
Gauge Metric Object
Gauges can be set to a specific value, increased and decreased.
"""
def set(self, value=1, **kwargs):
"""
Sets Gauge to a specific value for the labels specified in kwargs.
"""
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._set_in_redis(fullmetric, value)
def inc(self, amount=1, **kwargs):
"""
Increments Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use dec().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount)
def dec(self, amount=1, **kwargs):
"""
Decrements Gauge by given amount for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
fullmetric = self._construct_metric_identifier(self.name, kwargs)
self._inc_in_redis(fullmetric, amount * -1)
class Histogram(Metric):
"""
Histogram Metric Object
"""
def __init__(self, name, helpstring, labelnames=None,
buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 30.0, _INF)):
if list(buckets) != sorted(buckets):
# This is probably an error on the part of the user,
# so raise rather than sorting for them.
raise ValueError('Buckets not in sorted order')
if buckets and buckets[-1] != _INF:
buckets.append(_INF)
if len(buckets) < 2:
raise ValueError('Must have at least two buckets')
self.buckets = buckets
super().__init__(name, helpstring, labelnames)
def observe(self, amount, **kwargs):
"""
Stores a value in the histogram for the labels specified in kwargs.
"""
if amount < 0:
raise ValueError("Amount must be greater than zero. Otherwise use inc().")
self._check_label_consistency(kwargs)
pipe = self._get_redis_pipeline()
countmetric = self._construct_metric_identifier(self.name + '_count', kwargs)
self._inc_in_redis(countmetric, 1, pipeline=pipe)
summetric = self._construct_metric_identifier(self.name + '_sum', kwargs)
self._inc_in_redis(summetric, amount, pipeline=pipe)
kwargs_le = dict(kwargs.items())
for i, bound in enumerate(self.buckets):
if amount <= bound:
kwargs_le['le'] = _float_to_go_string(bound)
bmetric = self._construct_metric_identifier(self.name + '_bucket', kwargs_le,
labelnames=self.labelnames + ["le"])
self._inc_in_redis(bmetric, 1, pipeline=pipe)
self._execute_redis_pipeline(pipe)
def estimate_count_fast(type):
"""
See https://wiki.postgresql.org/wiki/Count_estimate
"""
if 'postgres' in settings.DATABASES['default']['ENGINE']:
cursor = connection.cursor()
cursor.execute("select reltuples from pg_class where relname='%s';" % type._meta.db_table)
row = cursor.fetchone()
return int(row[0])
else:
return type.objects.count()
def metric_values():
"""
Produces the the values to be presented to the monitoring system
"""
metrics = defaultdict(dict)
# Metrics from redis
if settings.HAS_REDIS:
for key, value in redis.hscan_iter(REDIS_KEY):
dkey = key.decode("utf-8")
splitted = dkey.split("{", 2)
value = float(value.decode("utf-8"))
metrics[splitted[0]]["{" + splitted[1]] = value
# Aliases
aliases = {
'pretix_view_requests_total': 'pretix_view_duration_seconds_count'
}
for a, atarget in aliases.items():
metrics[a] = metrics[atarget]
# Throwaway metrics
exact_tables = [
Order, OrderPosition, Invoice, Event, Organizer
]
for m in apps.get_models(): # Count all models
if any(issubclass(m, p) for p in exact_tables):
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = m.objects.count()
else:
metrics['pretix_model_instances']['{model="%s"}' % m._meta] = estimate_count_fast(m)
if settings.HAS_CELERY:
client = app.broker_connection().channel().client
for q in settings.CELERY_TASK_QUEUES:
llen = client.llen(q.name)
lfirst = client.lindex(q.name, -1)
metrics['pretix_celery_tasks_queued_count']['{queue="%s"}' % q.name] = llen
if lfirst:
ldata = json.loads(lfirst)
dt = time.time() - ldata.get('created', 0)
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = dt
else:
metrics['pretix_celery_tasks_queued_age_seconds']['{queue="%s"}' % q.name] = 0
return metrics
"""
Provided metrics
"""
pretix_view_duration_seconds = Histogram("pretix_view_duration_seconds", "Return time of views.",
["status_code", "method", "url_name"])
pretix_task_runs_total = Counter("pretix_task_runs_total", "Total calls to a celery task",
["task_name", "status"])
pretix_task_duration_seconds = Histogram("pretix_task_duration_seconds", "Call time of a celery task",
["task_name"])
| en | 0.756257 | # inspired by https://github.com/prometheus/client_python/blob/master/prometheus_client/core.py Base Metrics Object Checks if the given labels provides exactly the labels that are required. # test if every required label is provided # now test if no further labels are required Constructs the scrapable metricname usable in the output format. Increments given key in Redis. Sets given key in Redis. Counter Metric Object Counters can only be increased, they can neither be set to a specific value nor decreased. Increments Counter by given amount for the labels specified in kwargs. Gauge Metric Object Gauges can be set to a specific value, increased and decreased. Sets Gauge to a specific value for the labels specified in kwargs. Increments Gauge by given amount for the labels specified in kwargs. Decrements Gauge by given amount for the labels specified in kwargs. Histogram Metric Object # This is probably an error on the part of the user, # so raise rather than sorting for them. Stores a value in the histogram for the labels specified in kwargs. See https://wiki.postgresql.org/wiki/Count_estimate Produces the the values to be presented to the monitoring system # Metrics from redis # Aliases # Throwaway metrics # Count all models Provided metrics | 2.120438 | 2 |
robo_gym/wrappers/env_wrappers/ur_ee_positioning_training.py | matteolucchi/robo-gym | 0 | 6624892 | <reponame>matteolucchi/robo-gym<gh_stars>0
import gym
import numpy as np
from typing import Tuple
class EndEffectorPositioningURTrainingCurriculum(gym.Wrapper):
def __init__(self, env, print_reward=False):
super().__init__(env)
self.env = env
# use counter as metric for level up
self.episode_counter = 0
self.reward_composition = {}
self.print_reward = print_reward
def reset(self, **kwargs):
if self.episode_counter % 5 == 0:
state = self.env.reset(randomize_start=True)
else:
state = self.env.reset(continue_on_success=True)
self.reward_composition = { 'goal_reached_weight': 0,
'collision_weight': 0,
'distance_weight': 0,
'smoothness_weight': 0,
'action_magnitude_weight': 0,
'velocity_magnitude_weight': 0}
return state
def step(self, action):
self.previous_action = self.env.previous_action
next_state, _, _, _ = self.env.step(action)
action = self.env.add_fixed_joints(action)
reward, done, info = self.reward(rs_state=self.env.rs_state, action=action)
if done:
self.episode_counter += 1
if done and self.print_reward:
print(f'Episode counter: {self.episode_counter} Current level: {self.get_level()}')
print(self.reward_composition)
return next_state, reward, done, info
def get_level(self):
level_thresholds = [75, 250, 500, 1000, 1500, 2500]
for i in range(len(level_thresholds)):
if self.episode_counter < level_thresholds[i]:
return i+1
return len(level_thresholds) + 1
def get_weights(self, level):
# weights
# reward for reaching the goal position
g_w = 2
# reward for collision (ground, table or self)
c_w = -1
# punishment according to the distance to the goal
d_w = -0.005
# punishment delta in two consecutive actions
s_w = -0.0002
# punishment for acting in general
a_w = -0.0001
# punishment for deltas in velocity
v_w = -0.0002
if level == 1:
s_w = s_w * 0
a_w = a_w * 0
v_w = v_w * 0
self.min_distance = 0.15
if level == 2:
d_w = d_w * 0
self.min_distance = 0.15
if level == 3:
d_w = d_w * 0
s_w = s_w * 5
a_w = a_w * 5
v_w = v_w * 5
self.min_distance = 0.15
if level == 4:
d_w = d_w * 0
s_w = s_w * 10
a_w = a_w * 10
v_w = v_w * 10
self.min_distance = 0.1
if level == 5:
d_w = d_w * 0
s_w = s_w * 15
a_w = a_w * 15
v_w = v_w * 15
self.min_distance = 0.05
if level == 6:
d_w = d_w * 0
s_w = s_w * 20
a_w = a_w * 20
v_w = v_w * 20
self.min_distance = 0.05
if level == 7:
d_w = d_w * 0
s_w = s_w * 25
a_w = a_w * 25
v_w = v_w * 25
self.min_distance = 0.01
return g_w, c_w, d_w, s_w, a_w, v_w
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self.env._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
level = self.get_level()
g_w, c_w, d_w, s_w, a_w, v_w = self.get_weights(level)
# Calculate distance to the target
target_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
euclidean_dist_3d = np.linalg.norm(target_coord - ee_coord)
joint_velocities = np.array([rs_state['base_joint_velocity'], rs_state['shoulder_joint_velocity'], rs_state['elbow_joint_velocity'], rs_state['wrist_1_joint_velocity'], rs_state['wrist_2_joint_velocity'], rs_state['wrist_3_joint_velocity']])
previous_action = self.previous_action
# distance weight
x = d_w * euclidean_dist_3d
reward += x
self.reward_composition['distance_weight'] += x
# smoothness
x = s_w * np.linalg.norm(action - previous_action)**2
reward += x
self.reward_composition['smoothness_weight'] += x
# action magnitude
x = a_w * np.linalg.norm(action)**2
reward += x
self.reward_composition['action_magnitude_weight'] += x
# velocity magnitude
x = v_w * np.linalg.norm(joint_velocities)**2
reward += x
self.reward_composition['velocity_magnitude_weight'] += x
if euclidean_dist_3d <= self.min_distance:
# goal reached
x = g_w * 1
reward = x
self.reward_composition['goal_reached_weight'] += x
done = True
info['final_status'] = 'success'
info['target_coord'] = target_coord
if rs_state['in_collision']:
# punishment for collision
x = c_w * 1
reward = x
self.reward_composition['collision_weight'] += x
done = True
info['final_status'] = 'collision'
info['target_coord'] = target_coord
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'max_steps_exceeded'
info['target_coord'] = target_coord
return reward, done, info
| import gym
import numpy as np
from typing import Tuple
class EndEffectorPositioningURTrainingCurriculum(gym.Wrapper):
def __init__(self, env, print_reward=False):
super().__init__(env)
self.env = env
# use counter as metric for level up
self.episode_counter = 0
self.reward_composition = {}
self.print_reward = print_reward
def reset(self, **kwargs):
if self.episode_counter % 5 == 0:
state = self.env.reset(randomize_start=True)
else:
state = self.env.reset(continue_on_success=True)
self.reward_composition = { 'goal_reached_weight': 0,
'collision_weight': 0,
'distance_weight': 0,
'smoothness_weight': 0,
'action_magnitude_weight': 0,
'velocity_magnitude_weight': 0}
return state
def step(self, action):
self.previous_action = self.env.previous_action
next_state, _, _, _ = self.env.step(action)
action = self.env.add_fixed_joints(action)
reward, done, info = self.reward(rs_state=self.env.rs_state, action=action)
if done:
self.episode_counter += 1
if done and self.print_reward:
print(f'Episode counter: {self.episode_counter} Current level: {self.get_level()}')
print(self.reward_composition)
return next_state, reward, done, info
def get_level(self):
level_thresholds = [75, 250, 500, 1000, 1500, 2500]
for i in range(len(level_thresholds)):
if self.episode_counter < level_thresholds[i]:
return i+1
return len(level_thresholds) + 1
def get_weights(self, level):
# weights
# reward for reaching the goal position
g_w = 2
# reward for collision (ground, table or self)
c_w = -1
# punishment according to the distance to the goal
d_w = -0.005
# punishment delta in two consecutive actions
s_w = -0.0002
# punishment for acting in general
a_w = -0.0001
# punishment for deltas in velocity
v_w = -0.0002
if level == 1:
s_w = s_w * 0
a_w = a_w * 0
v_w = v_w * 0
self.min_distance = 0.15
if level == 2:
d_w = d_w * 0
self.min_distance = 0.15
if level == 3:
d_w = d_w * 0
s_w = s_w * 5
a_w = a_w * 5
v_w = v_w * 5
self.min_distance = 0.15
if level == 4:
d_w = d_w * 0
s_w = s_w * 10
a_w = a_w * 10
v_w = v_w * 10
self.min_distance = 0.1
if level == 5:
d_w = d_w * 0
s_w = s_w * 15
a_w = a_w * 15
v_w = v_w * 15
self.min_distance = 0.05
if level == 6:
d_w = d_w * 0
s_w = s_w * 20
a_w = a_w * 20
v_w = v_w * 20
self.min_distance = 0.05
if level == 7:
d_w = d_w * 0
s_w = s_w * 25
a_w = a_w * 25
v_w = v_w * 25
self.min_distance = 0.01
return g_w, c_w, d_w, s_w, a_w, v_w
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self.env._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
level = self.get_level()
g_w, c_w, d_w, s_w, a_w, v_w = self.get_weights(level)
# Calculate distance to the target
target_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
euclidean_dist_3d = np.linalg.norm(target_coord - ee_coord)
joint_velocities = np.array([rs_state['base_joint_velocity'], rs_state['shoulder_joint_velocity'], rs_state['elbow_joint_velocity'], rs_state['wrist_1_joint_velocity'], rs_state['wrist_2_joint_velocity'], rs_state['wrist_3_joint_velocity']])
previous_action = self.previous_action
# distance weight
x = d_w * euclidean_dist_3d
reward += x
self.reward_composition['distance_weight'] += x
# smoothness
x = s_w * np.linalg.norm(action - previous_action)**2
reward += x
self.reward_composition['smoothness_weight'] += x
# action magnitude
x = a_w * np.linalg.norm(action)**2
reward += x
self.reward_composition['action_magnitude_weight'] += x
# velocity magnitude
x = v_w * np.linalg.norm(joint_velocities)**2
reward += x
self.reward_composition['velocity_magnitude_weight'] += x
if euclidean_dist_3d <= self.min_distance:
# goal reached
x = g_w * 1
reward = x
self.reward_composition['goal_reached_weight'] += x
done = True
info['final_status'] = 'success'
info['target_coord'] = target_coord
if rs_state['in_collision']:
# punishment for collision
x = c_w * 1
reward = x
self.reward_composition['collision_weight'] += x
done = True
info['final_status'] = 'collision'
info['target_coord'] = target_coord
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'max_steps_exceeded'
info['target_coord'] = target_coord
return reward, done, info | en | 0.90488 | # use counter as metric for level up # weights # reward for reaching the goal position # reward for collision (ground, table or self) # punishment according to the distance to the goal # punishment delta in two consecutive actions # punishment for acting in general # punishment for deltas in velocity # Calculate distance to the target # distance weight # smoothness # action magnitude # velocity magnitude # goal reached # punishment for collision | 2.880787 | 3 |
pyvcloud/schema/vcd/v1_5/schemas/vcloud/vdcTemplateListType.py | h-medjahed/pyvcloud | 0 | 6624893 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Apr 14 22:18:33 2015 by generateDS.py version 2.15a.
#
# Command line options:
# ('-o', 'schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py')
#
# Command line arguments:
# /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd
#
# Command line:
# /home/eli/qa/.venv/src/pyvcloud/.venv/bin/generateDS.py -o "schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py" /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd
#
# Current working directory (os.getcwd()):
# pyvcloud
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
from lxml import etree as etree_
Validate_simpletypes_ = True
def parsexml_(*args, **kwargs):
if 'parser' not in kwargs:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class VCloudExtensionType(GeneratedsSuper):
"""0.9 Cloud API extension type with any elements and any attributes.
always Determines whether server should fail if extension is not
understood."""
subclass = None
superclass = None
def __init__(self, required=True, anytypeobjs_=None):
self.original_tagname_ = None
self.required = _cast(bool, required)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if VCloudExtensionType.subclass:
return VCloudExtensionType.subclass(*args_, **kwargs_)
else:
return VCloudExtensionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_required(self): return self.required
def set_required(self, required): self.required = required
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VCloudExtensionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VCloudExtensionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VCloudExtensionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VCloudExtensionType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.add(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.add(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (
unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (
unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.add(name)
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
if self.required is not None and 'required' not in already_processed:
already_processed.add('required')
outfile.write(' required="%s"' % self.gds_format_boolean(self.required, input_name='required'))
def exportChildren(self, outfile, level, namespace_='', name_='VCloudExtensionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VCloudExtensionType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.required is not None and 'required' not in already_processed:
already_processed.add('required')
showIndent(outfile, level)
outfile.write('required=%s,\n' % (self.required,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s="%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('required', node)
if value is not None and 'required' not in already_processed:
already_processed.add('required')
if value in ('true', '1'):
self.required = True
elif value in ('false', '0'):
self.required = False
else:
raise_parse_error(node, 'Bad boolean attribute')
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'VCloudExtensionType')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class VCloudExtensionType
class VCloudExtensibleType(GeneratedsSuper):
"""0.9 A base abstract type for all complex types that support
extensions."""
subclass = None
superclass = None
def __init__(self, VCloudExtension=None, extensiontype_=None):
self.original_tagname_ = None
if VCloudExtension is None:
self.VCloudExtension = []
else:
self.VCloudExtension = VCloudExtension
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if VCloudExtensibleType.subclass:
return VCloudExtensibleType.subclass(*args_, **kwargs_)
else:
return VCloudExtensibleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_VCloudExtension(self): return self.VCloudExtension
def set_VCloudExtension(self, VCloudExtension): self.VCloudExtension = VCloudExtension
def add_VCloudExtension(self, value): self.VCloudExtension.append(value)
def insert_VCloudExtension_at(self, index, value): self.VCloudExtension.insert(index, value)
def replace_VCloudExtension_at(self, index, value): self.VCloudExtension[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.VCloudExtension
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VCloudExtensibleType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VCloudExtensibleType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VCloudExtensibleType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VCloudExtensibleType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.add(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.add(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (
unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (
unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.add(name)
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='', name_='VCloudExtensibleType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for VCloudExtension_ in self.VCloudExtension:
VCloudExtension_.export(outfile, level, namespace_, name_='VCloudExtension', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VCloudExtensibleType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s="%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('VCloudExtension=[\n')
level += 1
for VCloudExtension_ in self.VCloudExtension:
showIndent(outfile, level)
outfile.write('model_.VCloudExtensionType(\n')
VCloudExtension_.exportLiteral(outfile, level, name_='VCloudExtensionType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'VCloudExtension':
obj_ = VCloudExtensionType.factory()
obj_.build(child_)
self.VCloudExtension.append(obj_)
obj_.original_tagname_ = 'VCloudExtension'
# end class VCloudExtensibleType
class ErrorType(VCloudExtensibleType):
"""0.9 The standard error message type used in the vCloud REST API.
none An one line, human-readable message describing the error
that occurred. none The class of the error. Matches the HTTP
status code. none Specific API error code (for example - can
indicate that vApp power on failed by some reason) none A
vendor/implementation specific error code that point to specific
modules/parts of the code and can make problem diagnostics
easier. 1.0none The stack trace of the exception which when
examined might make problem diagnostics easier."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, vendorSpecificErrorCode=None, stackTrace=None, message=None, minorErrorCode=None, majorErrorCode=None):
self.original_tagname_ = None
super(ErrorType, self).__init__(VCloudExtension, )
self.vendorSpecificErrorCode = _cast(None, vendorSpecificErrorCode)
self.stackTrace = _cast(None, stackTrace)
self.message = _cast(None, message)
self.minorErrorCode = _cast(None, minorErrorCode)
self.majorErrorCode = _cast(int, majorErrorCode)
def factory(*args_, **kwargs_):
if ErrorType.subclass:
return ErrorType.subclass(*args_, **kwargs_)
else:
return ErrorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vendorSpecificErrorCode(self): return self.vendorSpecificErrorCode
def set_vendorSpecificErrorCode(self, vendorSpecificErrorCode): self.vendorSpecificErrorCode = vendorSpecificErrorCode
def get_stackTrace(self): return self.stackTrace
def set_stackTrace(self, stackTrace): self.stackTrace = stackTrace
def get_message(self): return self.message
def set_message(self, message): self.message = message
def get_minorErrorCode(self): return self.minorErrorCode
def set_minorErrorCode(self, minorErrorCode): self.minorErrorCode = minorErrorCode
def get_majorErrorCode(self): return self.majorErrorCode
def set_majorErrorCode(self, majorErrorCode): self.majorErrorCode = majorErrorCode
def hasContent_(self):
if (
super(ErrorType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ErrorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ErrorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ErrorType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ErrorType'):
super(ErrorType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ErrorType')
if self.vendorSpecificErrorCode is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
outfile.write(' vendorSpecificErrorCode=%s' % (self.gds_format_string(quote_attrib(self.vendorSpecificErrorCode).encode(ExternalEncoding), input_name='vendorSpecificErrorCode'), ))
if self.stackTrace is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
outfile.write(' stackTrace=%s' % (self.gds_format_string(quote_attrib(self.stackTrace).encode(ExternalEncoding), input_name='stackTrace'), ))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.minorErrorCode is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
outfile.write(' minorErrorCode=%s' % (self.gds_format_string(quote_attrib(self.minorErrorCode).encode(ExternalEncoding), input_name='minorErrorCode'), ))
if self.majorErrorCode is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
outfile.write(' majorErrorCode="%s"' % self.gds_format_integer(self.majorErrorCode, input_name='majorErrorCode'))
def exportChildren(self, outfile, level, namespace_='', name_='ErrorType', fromsubclass_=False, pretty_print=True):
super(ErrorType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ErrorType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.vendorSpecificErrorCode is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
showIndent(outfile, level)
outfile.write('vendorSpecificErrorCode="%s",\n' % (self.vendorSpecificErrorCode,))
if self.stackTrace is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
showIndent(outfile, level)
outfile.write('stackTrace="%s",\n' % (self.stackTrace,))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
showIndent(outfile, level)
outfile.write('message="%s",\n' % (self.message,))
if self.minorErrorCode is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
showIndent(outfile, level)
outfile.write('minorErrorCode="%s",\n' % (self.minorErrorCode,))
if self.majorErrorCode is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
showIndent(outfile, level)
outfile.write('majorErrorCode=%d,\n' % (self.majorErrorCode,))
super(ErrorType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ErrorType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('vendorSpecificErrorCode', node)
if value is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
self.vendorSpecificErrorCode = value
value = find_attr_value_('stackTrace', node)
if value is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
self.stackTrace = value
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.add('message')
self.message = value
value = find_attr_value_('minorErrorCode', node)
if value is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
self.minorErrorCode = value
value = find_attr_value_('majorErrorCode', node)
if value is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
try:
self.majorErrorCode = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(ErrorType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ErrorType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ErrorType
class ResourceType(VCloudExtensibleType):
"""0.9 The base type for all objects in the vCloud model. Has an
optional list of links and href and type attributes. always
Contains the URI to the entity. always Contains the type of the
entity."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, extensiontype_=None):
self.original_tagname_ = None
super(ResourceType, self).__init__(VCloudExtension, extensiontype_, )
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
if Link is None:
self.Link = []
else:
self.Link = Link
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ResourceType.subclass:
return ResourceType.subclass(*args_, **kwargs_)
else:
return ResourceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Link(self): return self.Link
def set_Link(self, Link): self.Link = Link
def add_Link(self, value): self.Link.append(value)
def insert_Link_at(self, index, value): self.Link.insert(index, value)
def replace_Link_at(self, index, value): self.Link[index] = value
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Link or
super(ResourceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ResourceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ResourceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResourceType'):
super(ResourceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceType')
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ResourceType', fromsubclass_=False, pretty_print=True):
super(ResourceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Link_ in self.Link:
Link_.export(outfile, level, namespace_, name_='Link', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ResourceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(ResourceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Link=[\n')
level += 1
for Link_ in self.Link:
showIndent(outfile, level)
outfile.write('model_.LinkType(\n')
Link_.exportLiteral(outfile, level, name_='LinkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ResourceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Link':
obj_ = LinkType.factory()
obj_.build(child_)
self.Link.append(obj_)
obj_.original_tagname_ = 'Link'
super(ResourceType, self).buildChildren(child_, node, nodeName_, True)
# end class ResourceType
class ParamsType(VCloudExtensibleType):
"""0.9 A basic type used to specify parameters for operations. always A
name as parameter."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, name=None, Description=None):
self.original_tagname_ = None
super(ParamsType, self).__init__(VCloudExtension, )
self.name = _cast(None, name)
self.Description = Description
def factory(*args_, **kwargs_):
if ParamsType.subclass:
return ParamsType.subclass(*args_, **kwargs_)
else:
return ParamsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
self.Description is not None or
super(ParamsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ParamsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ParamsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ParamsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ParamsType'):
super(ParamsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ParamsType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ParamsType', fromsubclass_=False, pretty_print=True):
super(ParamsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.Description).encode(ExternalEncoding), input_name='Description'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='ParamsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(ParamsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ParamsType, self).exportLiteralChildren(outfile, level, name_)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=%s,\n' % quote_python(self.Description).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
super(ParamsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
Description_ = child_.text
Description_ = self.gds_validate_string(Description_, node, 'Description')
self.Description = Description_
super(ParamsType, self).buildChildren(child_, node, nodeName_, True)
# end class ParamsType
class ReferenceType(VCloudExtensibleType):
"""0.9 A reference to a resource. Contains an href attribute and
optional name and type attributes. always Contains the URI to
the entity. always The resource identifier, expressed in URN
format. The value of this attribute uniquely identifies the
resource, persists for the life of the resource, and is never
reused. always Contains the type of the the entity. always
Contains the name of the the entity."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
super(ReferenceType, self).__init__(VCloudExtension, extensiontype_, )
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(ReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceType'):
super(ReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceType', fromsubclass_=False, pretty_print=True):
super(ReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(ReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ReferenceType
class ResourceReferenceType(ReferenceType):
"""0.9 Represents a reference to a resource. Reference that contains an
href attribute, an optional name and type attributes, and a
resource status attribute. none Status of a resource."""
subclass = None
superclass = ReferenceType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, status=None):
self.original_tagname_ = None
super(ResourceReferenceType, self).__init__(VCloudExtension, href, type_, id, name, )
self.status = _cast(int, status)
def factory(*args_, **kwargs_):
if ResourceReferenceType.subclass:
return ResourceReferenceType.subclass(*args_, **kwargs_)
else:
return ResourceReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_status(self): return self.status
def set_status(self, status): self.status = status
def hasContent_(self):
if (
super(ResourceReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ResourceReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ResourceReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResourceReferenceType'):
super(ResourceReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceReferenceType')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status="%s"' % self.gds_format_integer(self.status, input_name='status'))
def exportChildren(self, outfile, level, namespace_='', name_='ResourceReferenceType', fromsubclass_=False, pretty_print=True):
super(ResourceReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ResourceReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status=%d,\n' % (self.status,))
super(ResourceReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
try:
self.status = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(ResourceReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ResourceReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ResourceReferenceType
class ContainerType(ResourceType):
"""Container for query result sets. none Query name that generated this
result set. none Page of the result set that this container
holds. The first page is page number 1. none Page size, as a
number of records or references. none Total number of records or
references in the container."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None, extensiontype_=None):
self.original_tagname_ = None
super(ContainerType, self).__init__(VCloudExtension, href, type_, Link, extensiontype_, )
self.total = _cast(int, total)
self.name = _cast(None, name)
self.pageSize = _cast(int, pageSize)
self.page = _cast(int, page)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ContainerType.subclass:
return ContainerType.subclass(*args_, **kwargs_)
else:
return ContainerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_total(self): return self.total
def set_total(self, total): self.total = total
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_pageSize(self): return self.pageSize
def set_pageSize(self, pageSize): self.pageSize = pageSize
def get_page(self): return self.page
def set_page(self, page): self.page = page
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(ContainerType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ContainerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContainerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ContainerType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContainerType'):
super(ContainerType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContainerType')
if self.total is not None and 'total' not in already_processed:
already_processed.add('total')
outfile.write(' total="%s"' % self.gds_format_integer(self.total, input_name='total'))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.pageSize is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
outfile.write(' pageSize="%s"' % self.gds_format_integer(self.pageSize, input_name='pageSize'))
if self.page is not None and 'page' not in already_processed:
already_processed.add('page')
outfile.write(' page="%s"' % self.gds_format_integer(self.page, input_name='page'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ContainerType', fromsubclass_=False, pretty_print=True):
super(ContainerType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ContainerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.total is not None and 'total' not in already_processed:
already_processed.add('total')
showIndent(outfile, level)
outfile.write('total=%d,\n' % (self.total,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.pageSize is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
showIndent(outfile, level)
outfile.write('pageSize=%d,\n' % (self.pageSize,))
if self.page is not None and 'page' not in already_processed:
already_processed.add('page')
showIndent(outfile, level)
outfile.write('page=%d,\n' % (self.page,))
super(ContainerType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContainerType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('total', node)
if value is not None and 'total' not in already_processed:
already_processed.add('total')
try:
self.total = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('pageSize', node)
if value is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
try:
self.pageSize = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('page', node)
if value is not None and 'page' not in already_processed:
already_processed.add('page')
try:
self.page = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ContainerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ContainerType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ContainerType
class CapacityType(VCloudExtensibleType):
"""0.9 Represents a capacity of a given resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Units=None, Allocated=None, Limit=None, extensiontype_=None):
self.original_tagname_ = None
super(CapacityType, self).__init__(VCloudExtension, extensiontype_, )
self.Units = Units
self.Allocated = Allocated
self.Limit = Limit
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CapacityType.subclass:
return CapacityType.subclass(*args_, **kwargs_)
else:
return CapacityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Units(self): return self.Units
def set_Units(self, Units): self.Units = Units
def get_Allocated(self): return self.Allocated
def set_Allocated(self, Allocated): self.Allocated = Allocated
def get_Limit(self): return self.Limit
def set_Limit(self, Limit): self.Limit = Limit
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Units is not None or
self.Allocated is not None or
self.Limit is not None or
super(CapacityType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CapacityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CapacityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CapacityType'):
super(CapacityType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityType')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='CapacityType', fromsubclass_=False, pretty_print=True):
super(CapacityType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Units is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sUnits>%s</%sUnits>%s' % (namespace_, self.gds_format_string(quote_xml(self.Units).encode(ExternalEncoding), input_name='Units'), namespace_, eol_))
if self.Allocated is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAllocated>%s</%sAllocated>%s' % (namespace_, self.gds_format_integer(self.Allocated, input_name='Allocated'), namespace_, eol_))
if self.Limit is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLimit>%s</%sLimit>%s' % (namespace_, self.gds_format_integer(self.Limit, input_name='Limit'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='CapacityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(CapacityType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CapacityType, self).exportLiteralChildren(outfile, level, name_)
if self.Units is not None:
showIndent(outfile, level)
outfile.write('Units=%s,\n' % quote_python(self.Units).encode(ExternalEncoding))
if self.Allocated is not None:
showIndent(outfile, level)
outfile.write('Allocated=%d,\n' % self.Allocated)
if self.Limit is not None:
showIndent(outfile, level)
outfile.write('Limit=%d,\n' % self.Limit)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(CapacityType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Units':
Units_ = child_.text
Units_ = self.gds_validate_string(Units_, node, 'Units')
self.Units = Units_
elif nodeName_ == 'Allocated':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Allocated')
self.Allocated = ival_
elif nodeName_ == 'Limit':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Limit')
self.Limit = ival_
super(CapacityType, self).buildChildren(child_, node, nodeName_, True)
# end class CapacityType
class CapacityWithUsageType(CapacityType):
"""0.9 Represents a capacity and usage of a given resource."""
subclass = None
superclass = CapacityType
def __init__(self, VCloudExtension=None, Units=None, Allocated=None, Limit=None, Used=None, Overhead=None):
self.original_tagname_ = None
super(CapacityWithUsageType, self).__init__(VCloudExtension, Units, Allocated, Limit, )
self.Used = Used
self.Overhead = Overhead
def factory(*args_, **kwargs_):
if CapacityWithUsageType.subclass:
return CapacityWithUsageType.subclass(*args_, **kwargs_)
else:
return CapacityWithUsageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Used(self): return self.Used
def set_Used(self, Used): self.Used = Used
def get_Overhead(self): return self.Overhead
def set_Overhead(self, Overhead): self.Overhead = Overhead
def hasContent_(self):
if (
self.Used is not None or
self.Overhead is not None or
super(CapacityWithUsageType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CapacityWithUsageType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityWithUsageType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CapacityWithUsageType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CapacityWithUsageType'):
super(CapacityWithUsageType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityWithUsageType')
def exportChildren(self, outfile, level, namespace_='', name_='CapacityWithUsageType', fromsubclass_=False, pretty_print=True):
super(CapacityWithUsageType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Used is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sUsed>%s</%sUsed>%s' % (namespace_, self.gds_format_integer(self.Used, input_name='Used'), namespace_, eol_))
if self.Overhead is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sOverhead>%s</%sOverhead>%s' % (namespace_, self.gds_format_integer(self.Overhead, input_name='Overhead'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='CapacityWithUsageType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(CapacityWithUsageType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CapacityWithUsageType, self).exportLiteralChildren(outfile, level, name_)
if self.Used is not None:
showIndent(outfile, level)
outfile.write('Used=%d,\n' % self.Used)
if self.Overhead is not None:
showIndent(outfile, level)
outfile.write('Overhead=%d,\n' % self.Overhead)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(CapacityWithUsageType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Used':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Used')
self.Used = ival_
elif nodeName_ == 'Overhead':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Overhead')
self.Overhead = ival_
super(CapacityWithUsageType, self).buildChildren(child_, node, nodeName_, True)
# end class CapacityWithUsageType
class AccessSettingType(VCloudExtensibleType):
"""0.9 Specifies who can access the resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Subject=None, AccessLevel=None):
self.original_tagname_ = None
super(AccessSettingType, self).__init__(VCloudExtension, )
self.Subject = Subject
self.AccessLevel = AccessLevel
def factory(*args_, **kwargs_):
if AccessSettingType.subclass:
return AccessSettingType.subclass(*args_, **kwargs_)
else:
return AccessSettingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Subject(self): return self.Subject
def set_Subject(self, Subject): self.Subject = Subject
def get_AccessLevel(self): return self.AccessLevel
def set_AccessLevel(self, AccessLevel): self.AccessLevel = AccessLevel
def hasContent_(self):
if (
self.Subject is not None or
self.AccessLevel is not None or
super(AccessSettingType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AccessSettingType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AccessSettingType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AccessSettingType'):
super(AccessSettingType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingType')
def exportChildren(self, outfile, level, namespace_='', name_='AccessSettingType', fromsubclass_=False, pretty_print=True):
super(AccessSettingType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Subject is not None:
self.Subject.export(outfile, level, namespace_, name_='Subject', pretty_print=pretty_print)
if self.AccessLevel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAccessLevel>%s</%sAccessLevel>%s' % (namespace_, self.gds_format_string(quote_xml(self.AccessLevel).encode(ExternalEncoding), input_name='AccessLevel'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='AccessSettingType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AccessSettingType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AccessSettingType, self).exportLiteralChildren(outfile, level, name_)
if self.Subject is not None:
showIndent(outfile, level)
outfile.write('Subject=model_.ReferenceType(\n')
self.Subject.exportLiteral(outfile, level, name_='Subject')
showIndent(outfile, level)
outfile.write('),\n')
if self.AccessLevel is not None:
showIndent(outfile, level)
outfile.write('AccessLevel=%s,\n' % quote_python(self.AccessLevel).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AccessSettingType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Subject':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Subject = obj_
obj_.original_tagname_ = 'Subject'
elif nodeName_ == 'AccessLevel':
AccessLevel_ = child_.text
AccessLevel_ = self.gds_validate_string(AccessLevel_, node, 'AccessLevel')
self.AccessLevel = AccessLevel_
super(AccessSettingType, self).buildChildren(child_, node, nodeName_, True)
# end class AccessSettingType
class AccessSettingsType(VCloudExtensibleType):
"""0.9 A list of access settings for a resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, AccessSetting=None):
self.original_tagname_ = None
super(AccessSettingsType, self).__init__(VCloudExtension, )
if AccessSetting is None:
self.AccessSetting = []
else:
self.AccessSetting = AccessSetting
def factory(*args_, **kwargs_):
if AccessSettingsType.subclass:
return AccessSettingsType.subclass(*args_, **kwargs_)
else:
return AccessSettingsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AccessSetting(self): return self.AccessSetting
def set_AccessSetting(self, AccessSetting): self.AccessSetting = AccessSetting
def add_AccessSetting(self, value): self.AccessSetting.append(value)
def insert_AccessSetting_at(self, index, value): self.AccessSetting.insert(index, value)
def replace_AccessSetting_at(self, index, value): self.AccessSetting[index] = value
def hasContent_(self):
if (
self.AccessSetting or
super(AccessSettingsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AccessSettingsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AccessSettingsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AccessSettingsType'):
super(AccessSettingsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingsType')
def exportChildren(self, outfile, level, namespace_='', name_='AccessSettingsType', fromsubclass_=False, pretty_print=True):
super(AccessSettingsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for AccessSetting_ in self.AccessSetting:
AccessSetting_.export(outfile, level, namespace_, name_='AccessSetting', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='AccessSettingsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AccessSettingsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AccessSettingsType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('AccessSetting=[\n')
level += 1
for AccessSetting_ in self.AccessSetting:
showIndent(outfile, level)
outfile.write('model_.AccessSettingType(\n')
AccessSetting_.exportLiteral(outfile, level, name_='AccessSettingType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AccessSettingsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AccessSetting':
obj_ = AccessSettingType.factory()
obj_.build(child_)
self.AccessSetting.append(obj_)
obj_.original_tagname_ = 'AccessSetting'
super(AccessSettingsType, self).buildChildren(child_, node, nodeName_, True)
# end class AccessSettingsType
class ControlAccessParamsType(VCloudExtensibleType):
"""0.9 Used to control access to resources."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, IsSharedToEveryone=None, EveryoneAccessLevel=None, AccessSettings=None):
self.original_tagname_ = None
super(ControlAccessParamsType, self).__init__(VCloudExtension, )
self.IsSharedToEveryone = IsSharedToEveryone
self.EveryoneAccessLevel = EveryoneAccessLevel
self.AccessSettings = AccessSettings
def factory(*args_, **kwargs_):
if ControlAccessParamsType.subclass:
return ControlAccessParamsType.subclass(*args_, **kwargs_)
else:
return ControlAccessParamsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_IsSharedToEveryone(self): return self.IsSharedToEveryone
def set_IsSharedToEveryone(self, IsSharedToEveryone): self.IsSharedToEveryone = IsSharedToEveryone
def get_EveryoneAccessLevel(self): return self.EveryoneAccessLevel
def set_EveryoneAccessLevel(self, EveryoneAccessLevel): self.EveryoneAccessLevel = EveryoneAccessLevel
def get_AccessSettings(self): return self.AccessSettings
def set_AccessSettings(self, AccessSettings): self.AccessSettings = AccessSettings
def hasContent_(self):
if (
self.IsSharedToEveryone is not None or
self.EveryoneAccessLevel is not None or
self.AccessSettings is not None or
super(ControlAccessParamsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ControlAccessParamsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ControlAccessParamsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ControlAccessParamsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ControlAccessParamsType'):
super(ControlAccessParamsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ControlAccessParamsType')
def exportChildren(self, outfile, level, namespace_='', name_='ControlAccessParamsType', fromsubclass_=False, pretty_print=True):
super(ControlAccessParamsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.IsSharedToEveryone is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsSharedToEveryone>%s</%sIsSharedToEveryone>%s' % (namespace_, self.gds_format_boolean(self.IsSharedToEveryone, input_name='IsSharedToEveryone'), namespace_, eol_))
if self.EveryoneAccessLevel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sEveryoneAccessLevel>%s</%sEveryoneAccessLevel>%s' % (namespace_, self.gds_format_string(quote_xml(self.EveryoneAccessLevel).encode(ExternalEncoding), input_name='EveryoneAccessLevel'), namespace_, eol_))
if self.AccessSettings is not None:
self.AccessSettings.export(outfile, level, namespace_, name_='AccessSettings', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ControlAccessParamsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ControlAccessParamsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ControlAccessParamsType, self).exportLiteralChildren(outfile, level, name_)
if self.IsSharedToEveryone is not None:
showIndent(outfile, level)
outfile.write('IsSharedToEveryone=%s,\n' % self.IsSharedToEveryone)
if self.EveryoneAccessLevel is not None:
showIndent(outfile, level)
outfile.write('EveryoneAccessLevel=%s,\n' % quote_python(self.EveryoneAccessLevel).encode(ExternalEncoding))
if self.AccessSettings is not None:
showIndent(outfile, level)
outfile.write('AccessSettings=model_.AccessSettingsType(\n')
self.AccessSettings.exportLiteral(outfile, level, name_='AccessSettings')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ControlAccessParamsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IsSharedToEveryone':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'IsSharedToEveryone')
self.IsSharedToEveryone = ival_
elif nodeName_ == 'EveryoneAccessLevel':
EveryoneAccessLevel_ = child_.text
EveryoneAccessLevel_ = self.gds_validate_string(EveryoneAccessLevel_, node, 'EveryoneAccessLevel')
self.EveryoneAccessLevel = EveryoneAccessLevel_
elif nodeName_ == 'AccessSettings':
obj_ = AccessSettingsType.factory()
obj_.build(child_)
self.AccessSettings = obj_
obj_.original_tagname_ = 'AccessSettings'
super(ControlAccessParamsType, self).buildChildren(child_, node, nodeName_, True)
# end class ControlAccessParamsType
class OwnerType(ResourceType):
"""1.5 Represents the owner of this entity."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, User=None):
self.original_tagname_ = None
super(OwnerType, self).__init__(VCloudExtension, href, type_, Link, )
self.User = User
def factory(*args_, **kwargs_):
if OwnerType.subclass:
return OwnerType.subclass(*args_, **kwargs_)
else:
return OwnerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_User(self): return self.User
def set_User(self, User): self.User = User
def hasContent_(self):
if (
self.User is not None or
super(OwnerType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='OwnerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='OwnerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='OwnerType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OwnerType'):
super(OwnerType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='OwnerType')
def exportChildren(self, outfile, level, namespace_='', name_='OwnerType', fromsubclass_=False, pretty_print=True):
super(OwnerType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.User is not None:
self.User.export(outfile, level, namespace_, name_='User', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='OwnerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(OwnerType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(OwnerType, self).exportLiteralChildren(outfile, level, name_)
if self.User is not None:
showIndent(outfile, level)
outfile.write('User=model_.ReferenceType(\n')
self.User.exportLiteral(outfile, level, name_='User')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(OwnerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'User':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.User = obj_
obj_.original_tagname_ = 'User'
super(OwnerType, self).buildChildren(child_, node, nodeName_, True)
# end class OwnerType
class ReferencesType(ContainerType):
"""This is the container for returned elements in referenceView"""
subclass = None
superclass = ContainerType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None, Reference=None):
self.original_tagname_ = None
super(ReferencesType, self).__init__(VCloudExtension, href, type_, Link, total, name, pageSize, page, )
if Reference is None:
self.Reference = []
else:
self.Reference = Reference
def factory(*args_, **kwargs_):
if ReferencesType.subclass:
return ReferencesType.subclass(*args_, **kwargs_)
else:
return ReferencesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Reference(self): return self.Reference
def set_Reference(self, Reference): self.Reference = Reference
def add_Reference(self, value): self.Reference.append(value)
def insert_Reference_at(self, index, value): self.Reference.insert(index, value)
def replace_Reference_at(self, index, value): self.Reference[index] = value
def hasContent_(self):
if (
self.Reference or
super(ReferencesType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferencesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferencesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferencesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferencesType'):
super(ReferencesType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferencesType')
def exportChildren(self, outfile, level, namespace_='', name_='ReferencesType', fromsubclass_=False, pretty_print=True):
super(ReferencesType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Reference_ in self.Reference:
Reference_.export(outfile, level, namespace_, name_='Reference', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ReferencesType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ReferencesType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferencesType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Reference=[\n')
level += 1
for Reference_ in self.Reference:
showIndent(outfile, level)
outfile.write('model_.Reference(\n')
Reference_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ReferencesType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Reference':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Reference.append(obj_)
obj_.original_tagname_ = 'Reference'
super(ReferencesType, self).buildChildren(child_, node, nodeName_, True)
# end class ReferencesType
class QueryListType(ContainerType):
"""Container for the list of typed queries available to the requesting
user."""
subclass = None
superclass = ContainerType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None):
self.original_tagname_ = None
super(QueryListType, self).__init__(VCloudExtension, href, type_, Link, total, name, pageSize, page, )
def factory(*args_, **kwargs_):
if QueryListType.subclass:
return QueryListType.subclass(*args_, **kwargs_)
else:
return QueryListType(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(QueryListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='QueryListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='QueryListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='QueryListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='QueryListType'):
super(QueryListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='QueryListType')
def exportChildren(self, outfile, level, namespace_='', name_='QueryListType', fromsubclass_=False, pretty_print=True):
super(QueryListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='QueryListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(QueryListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(QueryListType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(QueryListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(QueryListType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class QueryListType
class MetadataEntryType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Key=None, Value=None):
self.original_tagname_ = None
super(MetadataEntryType, self).__init__(VCloudExtension, href, type_, Link, )
self.Key = Key
self.Value = Value
def factory(*args_, **kwargs_):
if MetadataEntryType.subclass:
return MetadataEntryType.subclass(*args_, **kwargs_)
else:
return MetadataEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Key(self): return self.Key
def set_Key(self, Key): self.Key = Key
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def hasContent_(self):
if (
self.Key is not None or
self.Value is not None or
super(MetadataEntryType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataEntryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataEntryType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataEntryType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataEntryType'):
super(MetadataEntryType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataEntryType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataEntryType', fromsubclass_=False, pretty_print=True):
super(MetadataEntryType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (namespace_, self.gds_format_string(quote_xml(self.Key).encode(ExternalEncoding), input_name='Key'), namespace_, eol_))
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (namespace_, self.gds_format_string(quote_xml(self.Value).encode(ExternalEncoding), input_name='Value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='MetadataEntryType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataEntryType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataEntryType, self).exportLiteralChildren(outfile, level, name_)
if self.Key is not None:
showIndent(outfile, level)
outfile.write('Key=%s,\n' % quote_python(self.Key).encode(ExternalEncoding))
if self.Value is not None:
showIndent(outfile, level)
outfile.write('Value=%s,\n' % quote_python(self.Value).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataEntryType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Key':
Key_ = child_.text
Key_ = self.gds_validate_string(Key_, node, 'Key')
self.Key = Key_
elif nodeName_ == 'Value':
Value_ = child_.text
Value_ = self.gds_validate_string(Value_, node, 'Value')
self.Value = Value_
super(MetadataEntryType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataEntryType
class MetadataType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, MetadataEntry=None):
self.original_tagname_ = None
super(MetadataType, self).__init__(VCloudExtension, href, type_, Link, )
if MetadataEntry is None:
self.MetadataEntry = []
else:
self.MetadataEntry = MetadataEntry
def factory(*args_, **kwargs_):
if MetadataType.subclass:
return MetadataType.subclass(*args_, **kwargs_)
else:
return MetadataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MetadataEntry(self): return self.MetadataEntry
def set_MetadataEntry(self, MetadataEntry): self.MetadataEntry = MetadataEntry
def add_MetadataEntry(self, value): self.MetadataEntry.append(value)
def insert_MetadataEntry_at(self, index, value): self.MetadataEntry.insert(index, value)
def replace_MetadataEntry_at(self, index, value): self.MetadataEntry[index] = value
def hasContent_(self):
if (
self.MetadataEntry or
super(MetadataType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataType'):
super(MetadataType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataType', fromsubclass_=False, pretty_print=True):
super(MetadataType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for MetadataEntry_ in self.MetadataEntry:
MetadataEntry_.export(outfile, level, namespace_, name_='MetadataEntry', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MetadataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('MetadataEntry=[\n')
level += 1
for MetadataEntry_ in self.MetadataEntry:
showIndent(outfile, level)
outfile.write('model_.MetadataEntryType(\n')
MetadataEntry_.exportLiteral(outfile, level, name_='MetadataEntryType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'MetadataEntry':
obj_ = MetadataEntryType.factory()
obj_.build(child_)
self.MetadataEntry.append(obj_)
obj_.original_tagname_ = 'MetadataEntry'
super(MetadataType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataType
class MetadataValueType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Value=None):
self.original_tagname_ = None
super(MetadataValueType, self).__init__(VCloudExtension, href, type_, Link, )
self.Value = Value
def factory(*args_, **kwargs_):
if MetadataValueType.subclass:
return MetadataValueType.subclass(*args_, **kwargs_)
else:
return MetadataValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def hasContent_(self):
if (
self.Value is not None or
super(MetadataValueType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataValueType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataValueType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataValueType'):
super(MetadataValueType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataValueType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataValueType', fromsubclass_=False, pretty_print=True):
super(MetadataValueType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (namespace_, self.gds_format_string(quote_xml(self.Value).encode(ExternalEncoding), input_name='Value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='MetadataValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataValueType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataValueType, self).exportLiteralChildren(outfile, level, name_)
if self.Value is not None:
showIndent(outfile, level)
outfile.write('Value=%s,\n' % quote_python(self.Value).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataValueType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Value':
Value_ = child_.text
Value_ = self.gds_validate_string(Value_, node, 'Value')
self.Value = Value_
super(MetadataValueType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataValueType
class EntityType(ResourceType):
"""0.9 Basic entity type in the vCloud object model. Includes a name,
an optional description, and an optional list of links. always
The name of the entity. none The entity identifier, expressed in
URN format. The value of this attribute uniquely identifies the
entity, persists for the life of the entity, and is never
reused."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, name=None, id=None, Description=None, Tasks=None, extensiontype_=None):
self.original_tagname_ = None
super(EntityType, self).__init__(VCloudExtension, href, type_, Link, extensiontype_, )
self.name = _cast(None, name)
self.id = _cast(None, id)
self.Description = Description
self.Tasks = Tasks
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityType.subclass:
return EntityType.subclass(*args_, **kwargs_)
else:
return EntityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Tasks(self): return self.Tasks
def set_Tasks(self, Tasks): self.Tasks = Tasks
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Description is not None or
self.Tasks is not None or
super(EntityType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityType'):
super(EntityType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityType', fromsubclass_=False, pretty_print=True):
super(EntityType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.Description).encode(ExternalEncoding), input_name='Description'), namespace_, eol_))
if self.Tasks is not None:
self.Tasks.export(outfile, level, namespace_, name_='Tasks', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
super(EntityType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityType, self).exportLiteralChildren(outfile, level, name_)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=%s,\n' % quote_python(self.Description).encode(ExternalEncoding))
if self.Tasks is not None:
showIndent(outfile, level)
outfile.write('Tasks=model_.TasksInProgressType(\n')
self.Tasks.exportLiteral(outfile, level, name_='Tasks')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(EntityType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
Description_ = child_.text
Description_ = self.gds_validate_string(Description_, node, 'Description')
self.Description = Description_
elif nodeName_ == 'Tasks':
obj_ = TasksInProgressType.factory()
obj_.build(child_)
self.Tasks = obj_
obj_.original_tagname_ = 'Tasks'
super(EntityType, self).buildChildren(child_, node, nodeName_, True)
# end class EntityType
class EntityReferenceType(VCloudExtensibleType):
"""1.5 A reference to a vCloud entity. none The object identifier,
expressed in URN format. The value of this attribute uniquely
identifies the object, persists for the life of the object, and
is never reused. This context-free identifier can apply to any
object in any system. always The type of the the referenced
object. always The name of the referenced object."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
super(EntityReferenceType, self).__init__(VCloudExtension, extensiontype_, )
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityReferenceType.subclass:
return EntityReferenceType.subclass(*args_, **kwargs_)
else:
return EntityReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(EntityReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityReferenceType'):
super(EntityReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityReferenceType')
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityReferenceType', fromsubclass_=False, pretty_print=True):
super(EntityReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(EntityReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(EntityReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EntityReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EntityReferenceType
class EntityLinkType(EntityReferenceType):
"""1.5 Extends EntityReference type by adding relation attribute.
always Defines the relationship of the link to the object that
contains it. A relationship can be the name of an operation on
the object, a reference to a contained or containing object, or
a reference to an alternate representation of the object. The
relationship value implies the HTTP verb to use when you use the
link's href value as a request URL."""
subclass = None
superclass = EntityReferenceType
def __init__(self, VCloudExtension=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(EntityLinkType, self).__init__(VCloudExtension, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if EntityLinkType.subclass:
return EntityLinkType.subclass(*args_, **kwargs_)
else:
return EntityLinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(EntityLinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityLinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityLinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityLinkType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityLinkType'):
super(EntityLinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityLinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='EntityLinkType', fromsubclass_=False, pretty_print=True):
super(EntityLinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityLinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(EntityLinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityLinkType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(EntityLinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EntityLinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EntityLinkType
class TasksInProgressType(VCloudExtensibleType):
"""0.9 A list of queued, running, or recently completed tasks."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Task=None):
self.original_tagname_ = None
super(TasksInProgressType, self).__init__(VCloudExtension, )
if Task is None:
self.Task = []
else:
self.Task = Task
def factory(*args_, **kwargs_):
if TasksInProgressType.subclass:
return TasksInProgressType.subclass(*args_, **kwargs_)
else:
return TasksInProgressType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Task(self): return self.Task
def set_Task(self, Task): self.Task = Task
def add_Task(self, value): self.Task.append(value)
def insert_Task_at(self, index, value): self.Task.insert(index, value)
def replace_Task_at(self, index, value): self.Task[index] = value
def hasContent_(self):
if (
self.Task or
super(TasksInProgressType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TasksInProgressType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TasksInProgressType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TasksInProgressType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TasksInProgressType'):
super(TasksInProgressType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TasksInProgressType')
def exportChildren(self, outfile, level, namespace_='', name_='TasksInProgressType', fromsubclass_=False, pretty_print=True):
super(TasksInProgressType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Task_ in self.Task:
Task_.export(outfile, level, namespace_, name_='Task', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='TasksInProgressType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TasksInProgressType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TasksInProgressType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Task=[\n')
level += 1
for Task_ in self.Task:
showIndent(outfile, level)
outfile.write('model_.TaskType(\n')
Task_.exportLiteral(outfile, level, name_='TaskType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TasksInProgressType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Task':
obj_ = TaskType.factory()
obj_.build(child_)
self.Task.append(obj_)
obj_.original_tagname_ = 'Task'
super(TasksInProgressType, self).buildChildren(child_, node, nodeName_, True)
# end class TasksInProgressType
class TaskType(EntityType):
"""0.9 Represents an asynchronous or long-running task in the vCloud
environment. none The execution status of the task. One of:
queued (The task has been queued for execution.), preRunning
(The task is awaiting preprocessing or, if it is a blocking
task, administrative action.), running (The task is runnning.),
success (The task completed with a status of success.), error
(The task encountered an error while running.), canceled (The
task was canceled by the owner or an administrator.), aborted
(The task was aborted by an administrative action.) none The
display name of the operation that is tracked by this task. none
The name of the operation that is tracked by this task. none The
date and time the system started executing the task. May not be
present if the task hasn't been executed yet. none The date and
time that processing of the task was completed. May not be
present if the task is still being executed. none The date and
time at which the task resource will be destroyed and no longer
available for retrieval. May not be present if the task has not
been executed or is still being executed."""
subclass = None
superclass = EntityType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, name=None, id=None, Description=None, Tasks=None, status=None, operationName=None, expiryTime=None, startTime=None, operation=None, endTime=None, Owner=None, Error=None, User=None, Organization=None, Progress=None, Params=None):
self.original_tagname_ = None
super(TaskType, self).__init__(VCloudExtension, href, type_, Link, name, id, Description, Tasks, )
self.status = _cast(None, status)
self.operationName = _cast(None, operationName)
if isinstance(expiryTime, basestring):
initvalue_ = datetime_.datetime.strptime(expiryTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = expiryTime
self.expiryTime = initvalue_
if isinstance(startTime, basestring):
initvalue_ = datetime_.datetime.strptime(startTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = startTime
self.startTime = initvalue_
self.operation = _cast(None, operation)
if isinstance(endTime, basestring):
initvalue_ = datetime_.datetime.strptime(endTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = endTime
self.endTime = initvalue_
self.Owner = Owner
self.Error = Error
self.User = User
self.Organization = Organization
self.Progress = Progress
self.Params = Params
def factory(*args_, **kwargs_):
if TaskType.subclass:
return TaskType.subclass(*args_, **kwargs_)
else:
return TaskType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Owner(self): return self.Owner
def set_Owner(self, Owner): self.Owner = Owner
def get_Error(self): return self.Error
def set_Error(self, Error): self.Error = Error
def get_User(self): return self.User
def set_User(self, User): self.User = User
def get_Organization(self): return self.Organization
def set_Organization(self, Organization): self.Organization = Organization
def get_Progress(self): return self.Progress
def set_Progress(self, Progress): self.Progress = Progress
def get_Params(self): return self.Params
def set_Params(self, Params): self.Params = Params
def get_status(self): return self.status
def set_status(self, status): self.status = status
def get_operationName(self): return self.operationName
def set_operationName(self, operationName): self.operationName = operationName
def get_expiryTime(self): return self.expiryTime
def set_expiryTime(self, expiryTime): self.expiryTime = expiryTime
def get_startTime(self): return self.startTime
def set_startTime(self, startTime): self.startTime = startTime
def get_operation(self): return self.operation
def set_operation(self, operation): self.operation = operation
def get_endTime(self): return self.endTime
def set_endTime(self, endTime): self.endTime = endTime
def hasContent_(self):
if (
self.Owner is not None or
self.Error is not None or
self.User is not None or
self.Organization is not None or
self.Progress is not None or
self.Params is not None or
super(TaskType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TaskType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaskType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TaskType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaskType'):
super(TaskType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaskType')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status=%s' % (self.gds_format_string(quote_attrib(self.status).encode(ExternalEncoding), input_name='status'), ))
if self.operationName is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
outfile.write(' operationName=%s' % (self.gds_format_string(quote_attrib(self.operationName).encode(ExternalEncoding), input_name='operationName'), ))
if self.expiryTime is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
outfile.write(' expiryTime="%s"' % self.gds_format_datetime(self.expiryTime, input_name='expiryTime'))
if self.startTime is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
outfile.write(' startTime="%s"' % self.gds_format_datetime(self.startTime, input_name='startTime'))
if self.operation is not None and 'operation' not in already_processed:
already_processed.add('operation')
outfile.write(' operation=%s' % (self.gds_format_string(quote_attrib(self.operation).encode(ExternalEncoding), input_name='operation'), ))
if self.endTime is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
outfile.write(' endTime="%s"' % self.gds_format_datetime(self.endTime, input_name='endTime'))
def exportChildren(self, outfile, level, namespace_='', name_='TaskType', fromsubclass_=False, pretty_print=True):
super(TaskType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Owner is not None:
self.Owner.export(outfile, level, namespace_, name_='Owner', pretty_print=pretty_print)
if self.Error is not None:
self.Error.export(outfile, level, namespace_, name_='Error', pretty_print=pretty_print)
if self.User is not None:
self.User.export(outfile, level, namespace_, name_='User', pretty_print=pretty_print)
if self.Organization is not None:
self.Organization.export(outfile, level, namespace_, name_='Organization', pretty_print=pretty_print)
if self.Progress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sProgress>%s</%sProgress>%s' % (namespace_, self.gds_format_integer(self.Progress, input_name='Progress'), namespace_, eol_))
if self.Params is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sParams>%s</%sParams>%s' % (namespace_, self.gds_format_string(quote_xml(self.Params).encode(ExternalEncoding), input_name='Params'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='TaskType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status="%s",\n' % (self.status,))
if self.operationName is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
showIndent(outfile, level)
outfile.write('operationName="%s",\n' % (self.operationName,))
if self.expiryTime is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
showIndent(outfile, level)
outfile.write('expiryTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.expiryTime, input_name='expiryTime'))
if self.startTime is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
showIndent(outfile, level)
outfile.write('startTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.startTime, input_name='startTime'))
if self.operation is not None and 'operation' not in already_processed:
already_processed.add('operation')
showIndent(outfile, level)
outfile.write('operation="%s",\n' % (self.operation,))
if self.endTime is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
showIndent(outfile, level)
outfile.write('endTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.endTime, input_name='endTime'))
super(TaskType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaskType, self).exportLiteralChildren(outfile, level, name_)
if self.Owner is not None:
showIndent(outfile, level)
outfile.write('Owner=model_.ReferenceType(\n')
self.Owner.exportLiteral(outfile, level, name_='Owner')
showIndent(outfile, level)
outfile.write('),\n')
if self.Error is not None:
showIndent(outfile, level)
outfile.write('Error=model_.ErrorType(\n')
self.Error.exportLiteral(outfile, level, name_='Error')
showIndent(outfile, level)
outfile.write('),\n')
if self.User is not None:
showIndent(outfile, level)
outfile.write('User=model_.ReferenceType(\n')
self.User.exportLiteral(outfile, level, name_='User')
showIndent(outfile, level)
outfile.write('),\n')
if self.Organization is not None:
showIndent(outfile, level)
outfile.write('Organization=model_.ReferenceType(\n')
self.Organization.exportLiteral(outfile, level, name_='Organization')
showIndent(outfile, level)
outfile.write('),\n')
if self.Progress is not None:
showIndent(outfile, level)
outfile.write('Progress=%d,\n' % self.Progress)
if self.Params is not None:
showIndent(outfile, level)
outfile.write('Params=%s,\n' % quote_python(self.Params).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
self.status = value
value = find_attr_value_('operationName', node)
if value is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
self.operationName = value
value = find_attr_value_('expiryTime', node)
if value is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
try:
self.expiryTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (expiryTime): %s' % exp)
value = find_attr_value_('startTime', node)
if value is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
try:
self.startTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (startTime): %s' % exp)
value = find_attr_value_('operation', node)
if value is not None and 'operation' not in already_processed:
already_processed.add('operation')
self.operation = value
value = find_attr_value_('endTime', node)
if value is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
try:
self.endTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (endTime): %s' % exp)
super(TaskType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Owner':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'Error':
obj_ = ErrorType.factory()
obj_.build(child_)
self.Error = obj_
obj_.original_tagname_ = 'Error'
elif nodeName_ == 'User':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.User = obj_
obj_.original_tagname_ = 'User'
elif nodeName_ == 'Organization':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Organization = obj_
obj_.original_tagname_ = 'Organization'
elif nodeName_ == 'Progress':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Progress')
self.Progress = ival_
elif nodeName_ == 'Params':
Params_ = child_.text
Params_ = self.gds_validate_string(Params_, node, 'Params')
self.Params = Params_
super(TaskType, self).buildChildren(child_, node, nodeName_, True)
# end class TaskType
class TaskOperationListType(ResourceType):
"""List of operation names. 1.5"""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Operation=None):
self.original_tagname_ = None
super(TaskOperationListType, self).__init__(VCloudExtension, href, type_, Link, )
if Operation is None:
self.Operation = []
else:
self.Operation = Operation
def factory(*args_, **kwargs_):
if TaskOperationListType.subclass:
return TaskOperationListType.subclass(*args_, **kwargs_)
else:
return TaskOperationListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Operation(self): return self.Operation
def set_Operation(self, Operation): self.Operation = Operation
def add_Operation(self, value): self.Operation.append(value)
def insert_Operation_at(self, index, value): self.Operation.insert(index, value)
def replace_Operation_at(self, index, value): self.Operation[index] = value
def hasContent_(self):
if (
self.Operation or
super(TaskOperationListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TaskOperationListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaskOperationListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TaskOperationListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaskOperationListType'):
super(TaskOperationListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaskOperationListType')
def exportChildren(self, outfile, level, namespace_='', name_='TaskOperationListType', fromsubclass_=False, pretty_print=True):
super(TaskOperationListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Operation_ in self.Operation:
showIndent(outfile, level, pretty_print)
outfile.write('<%sOperation>%s</%sOperation>%s' % (namespace_, self.gds_format_string(quote_xml(Operation_).encode(ExternalEncoding), input_name='Operation'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='TaskOperationListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TaskOperationListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaskOperationListType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Operation=[\n')
level += 1
for Operation_ in self.Operation:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Operation_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TaskOperationListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Operation':
Operation_ = child_.text
Operation_ = self.gds_validate_string(Operation_, node, 'Operation')
self.Operation.append(Operation_)
super(TaskOperationListType, self).buildChildren(child_, node, nodeName_, True)
# end class TaskOperationListType
class LinkType(ReferenceType):
"""0.9 Extends reference type by adding relation attribute. Defines a
hyper-link with a relationship, hyper-link reference, and an
optional media type. always Defines the relationship of the link
to the object that contains it. A relationship can be the name
of an operation on the object, a reference to a contained or
containing object, or a reference to an alternate representation
of the object. The relationship value implies the HTTP verb to
use when you use the link's href as a request URL."""
subclass = None
superclass = ReferenceType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(LinkType, self).__init__(VCloudExtension, href, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if LinkType.subclass:
return LinkType.subclass(*args_, **kwargs_)
else:
return LinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(LinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LinkType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinkType'):
super(LinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='LinkType', fromsubclass_=False, pretty_print=True):
super(LinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='LinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(LinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LinkType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(LinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LinkType
class VdcTemplateListType(ResourceType):
"""0.9 Represents an VDC Template list."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, VdcTemplate=None):
self.original_tagname_ = None
super(VdcTemplateListType, self).__init__(VCloudExtension, href, type_, Link, )
if VdcTemplate is None:
self.VdcTemplate = []
else:
self.VdcTemplate = VdcTemplate
def factory(*args_, **kwargs_):
if VdcTemplateListType.subclass:
return VdcTemplateListType.subclass(*args_, **kwargs_)
else:
return VdcTemplateListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_VdcTemplate(self): return self.VdcTemplate
def set_VdcTemplate(self, VdcTemplate): self.VdcTemplate = VdcTemplate
def add_VdcTemplate(self, value): self.VdcTemplate.append(value)
def insert_VdcTemplate_at(self, index, value): self.VdcTemplate.insert(index, value)
def replace_VdcTemplate_at(self, index, value): self.VdcTemplate[index] = value
def hasContent_(self):
if (
self.VdcTemplate or
super(VdcTemplateListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VdcTemplateListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VdcTemplateListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VdcTemplateListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VdcTemplateListType'):
super(VdcTemplateListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VdcTemplateListType')
def exportChildren(self, outfile, level, namespace_='', name_='VdcTemplateListType', fromsubclass_=False, pretty_print=True):
super(VdcTemplateListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for VdcTemplate_ in self.VdcTemplate:
VdcTemplate_.export(outfile, level, namespace_, name_='VdcTemplate', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VdcTemplateListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(VdcTemplateListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VdcTemplateListType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('VdcTemplate=[\n')
level += 1
for VdcTemplate_ in self.VdcTemplate:
showIndent(outfile, level)
outfile.write('model_.ReferenceType(\n')
VdcTemplate_.exportLiteral(outfile, level, name_='ReferenceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(VdcTemplateListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'VdcTemplate':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.VdcTemplate.append(obj_)
obj_.original_tagname_ = 'VdcTemplate'
super(VdcTemplateListType, self).buildChildren(child_, node, nodeName_, True)
# end class VdcTemplateListType
GDSClassesMapping = {
'VdcTemplateList': VdcTemplateListType,
'Metadata': MetadataType,
'AccessSettings': AccessSettingsType,
'Reference': ReferenceType,
'AccessSetting': AccessSettingType,
'ControlAccessParams': ControlAccessParamsType,
'MetadataEntry': MetadataEntryType,
'QueryList': QueryListType,
'Entity': EntityType,
'VdcTemplate': ReferenceType,
'Tasks': TasksInProgressType,
'Task': TaskType,
'MetadataValue': MetadataValueType,
'Link': LinkType,
'User': ReferenceType,
'Organization': ReferenceType,
'Error': ErrorType,
'Owner': ReferenceType,
'References': ReferencesType,
'VCloudExtension': VCloudExtensionType,
'Subject': ReferenceType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from VdcTemplateList import *\n\n')
sys.stdout.write('import VdcTemplateList as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AccessSettingType",
"AccessSettingsType",
"CapacityType",
"CapacityWithUsageType",
"ContainerType",
"ControlAccessParamsType",
"EntityLinkType",
"EntityReferenceType",
"EntityType",
"ErrorType",
"LinkType",
"MetadataEntryType",
"MetadataType",
"MetadataValueType",
"OwnerType",
"ParamsType",
"QueryListType",
"ReferenceType",
"ReferencesType",
"ResourceReferenceType",
"ResourceType",
"TaskOperationListType",
"TaskType",
"TasksInProgressType",
"VCloudExtensibleType",
"VCloudExtensionType",
"VdcTemplateListType"
]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Apr 14 22:18:33 2015 by generateDS.py version 2.15a.
#
# Command line options:
# ('-o', 'schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py')
#
# Command line arguments:
# /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd
#
# Command line:
# /home/eli/qa/.venv/src/pyvcloud/.venv/bin/generateDS.py -o "schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py" /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd
#
# Current working directory (os.getcwd()):
# pyvcloud
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
from lxml import etree as etree_
Validate_simpletypes_ = True
def parsexml_(*args, **kwargs):
if 'parser' not in kwargs:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class VCloudExtensionType(GeneratedsSuper):
"""0.9 Cloud API extension type with any elements and any attributes.
always Determines whether server should fail if extension is not
understood."""
subclass = None
superclass = None
def __init__(self, required=True, anytypeobjs_=None):
self.original_tagname_ = None
self.required = _cast(bool, required)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if VCloudExtensionType.subclass:
return VCloudExtensionType.subclass(*args_, **kwargs_)
else:
return VCloudExtensionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_required(self): return self.required
def set_required(self, required): self.required = required
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VCloudExtensionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VCloudExtensionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VCloudExtensionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VCloudExtensionType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.add(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.add(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (
unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (
unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.add(name)
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
if self.required is not None and 'required' not in already_processed:
already_processed.add('required')
outfile.write(' required="%s"' % self.gds_format_boolean(self.required, input_name='required'))
def exportChildren(self, outfile, level, namespace_='', name_='VCloudExtensionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VCloudExtensionType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.required is not None and 'required' not in already_processed:
already_processed.add('required')
showIndent(outfile, level)
outfile.write('required=%s,\n' % (self.required,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s="%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('required', node)
if value is not None and 'required' not in already_processed:
already_processed.add('required')
if value in ('true', '1'):
self.required = True
elif value in ('false', '0'):
self.required = False
else:
raise_parse_error(node, 'Bad boolean attribute')
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'VCloudExtensionType')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class VCloudExtensionType
class VCloudExtensibleType(GeneratedsSuper):
"""0.9 A base abstract type for all complex types that support
extensions."""
subclass = None
superclass = None
def __init__(self, VCloudExtension=None, extensiontype_=None):
self.original_tagname_ = None
if VCloudExtension is None:
self.VCloudExtension = []
else:
self.VCloudExtension = VCloudExtension
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if VCloudExtensibleType.subclass:
return VCloudExtensibleType.subclass(*args_, **kwargs_)
else:
return VCloudExtensibleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_VCloudExtension(self): return self.VCloudExtension
def set_VCloudExtension(self, VCloudExtension): self.VCloudExtension = VCloudExtension
def add_VCloudExtension(self, value): self.VCloudExtension.append(value)
def insert_VCloudExtension_at(self, index, value): self.VCloudExtension.insert(index, value)
def replace_VCloudExtension_at(self, index, value): self.VCloudExtension[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.VCloudExtension
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VCloudExtensibleType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VCloudExtensibleType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VCloudExtensibleType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VCloudExtensibleType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.add(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.add(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (
unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (
unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.add(name)
outfile.write(' %s=%s' % (
name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='', name_='VCloudExtensibleType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for VCloudExtension_ in self.VCloudExtension:
VCloudExtension_.export(outfile, level, namespace_, name_='VCloudExtension', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VCloudExtensibleType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s="%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('VCloudExtension=[\n')
level += 1
for VCloudExtension_ in self.VCloudExtension:
showIndent(outfile, level)
outfile.write('model_.VCloudExtensionType(\n')
VCloudExtension_.exportLiteral(outfile, level, name_='VCloudExtensionType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'VCloudExtension':
obj_ = VCloudExtensionType.factory()
obj_.build(child_)
self.VCloudExtension.append(obj_)
obj_.original_tagname_ = 'VCloudExtension'
# end class VCloudExtensibleType
class ErrorType(VCloudExtensibleType):
"""0.9 The standard error message type used in the vCloud REST API.
none An one line, human-readable message describing the error
that occurred. none The class of the error. Matches the HTTP
status code. none Specific API error code (for example - can
indicate that vApp power on failed by some reason) none A
vendor/implementation specific error code that point to specific
modules/parts of the code and can make problem diagnostics
easier. 1.0none The stack trace of the exception which when
examined might make problem diagnostics easier."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, vendorSpecificErrorCode=None, stackTrace=None, message=None, minorErrorCode=None, majorErrorCode=None):
self.original_tagname_ = None
super(ErrorType, self).__init__(VCloudExtension, )
self.vendorSpecificErrorCode = _cast(None, vendorSpecificErrorCode)
self.stackTrace = _cast(None, stackTrace)
self.message = _cast(None, message)
self.minorErrorCode = _cast(None, minorErrorCode)
self.majorErrorCode = _cast(int, majorErrorCode)
def factory(*args_, **kwargs_):
if ErrorType.subclass:
return ErrorType.subclass(*args_, **kwargs_)
else:
return ErrorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vendorSpecificErrorCode(self): return self.vendorSpecificErrorCode
def set_vendorSpecificErrorCode(self, vendorSpecificErrorCode): self.vendorSpecificErrorCode = vendorSpecificErrorCode
def get_stackTrace(self): return self.stackTrace
def set_stackTrace(self, stackTrace): self.stackTrace = stackTrace
def get_message(self): return self.message
def set_message(self, message): self.message = message
def get_minorErrorCode(self): return self.minorErrorCode
def set_minorErrorCode(self, minorErrorCode): self.minorErrorCode = minorErrorCode
def get_majorErrorCode(self): return self.majorErrorCode
def set_majorErrorCode(self, majorErrorCode): self.majorErrorCode = majorErrorCode
def hasContent_(self):
if (
super(ErrorType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ErrorType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ErrorType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ErrorType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ErrorType'):
super(ErrorType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ErrorType')
if self.vendorSpecificErrorCode is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
outfile.write(' vendorSpecificErrorCode=%s' % (self.gds_format_string(quote_attrib(self.vendorSpecificErrorCode).encode(ExternalEncoding), input_name='vendorSpecificErrorCode'), ))
if self.stackTrace is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
outfile.write(' stackTrace=%s' % (self.gds_format_string(quote_attrib(self.stackTrace).encode(ExternalEncoding), input_name='stackTrace'), ))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.minorErrorCode is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
outfile.write(' minorErrorCode=%s' % (self.gds_format_string(quote_attrib(self.minorErrorCode).encode(ExternalEncoding), input_name='minorErrorCode'), ))
if self.majorErrorCode is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
outfile.write(' majorErrorCode="%s"' % self.gds_format_integer(self.majorErrorCode, input_name='majorErrorCode'))
def exportChildren(self, outfile, level, namespace_='', name_='ErrorType', fromsubclass_=False, pretty_print=True):
super(ErrorType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ErrorType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.vendorSpecificErrorCode is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
showIndent(outfile, level)
outfile.write('vendorSpecificErrorCode="%s",\n' % (self.vendorSpecificErrorCode,))
if self.stackTrace is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
showIndent(outfile, level)
outfile.write('stackTrace="%s",\n' % (self.stackTrace,))
if self.message is not None and 'message' not in already_processed:
already_processed.add('message')
showIndent(outfile, level)
outfile.write('message="%s",\n' % (self.message,))
if self.minorErrorCode is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
showIndent(outfile, level)
outfile.write('minorErrorCode="%s",\n' % (self.minorErrorCode,))
if self.majorErrorCode is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
showIndent(outfile, level)
outfile.write('majorErrorCode=%d,\n' % (self.majorErrorCode,))
super(ErrorType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ErrorType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('vendorSpecificErrorCode', node)
if value is not None and 'vendorSpecificErrorCode' not in already_processed:
already_processed.add('vendorSpecificErrorCode')
self.vendorSpecificErrorCode = value
value = find_attr_value_('stackTrace', node)
if value is not None and 'stackTrace' not in already_processed:
already_processed.add('stackTrace')
self.stackTrace = value
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.add('message')
self.message = value
value = find_attr_value_('minorErrorCode', node)
if value is not None and 'minorErrorCode' not in already_processed:
already_processed.add('minorErrorCode')
self.minorErrorCode = value
value = find_attr_value_('majorErrorCode', node)
if value is not None and 'majorErrorCode' not in already_processed:
already_processed.add('majorErrorCode')
try:
self.majorErrorCode = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(ErrorType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ErrorType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ErrorType
class ResourceType(VCloudExtensibleType):
"""0.9 The base type for all objects in the vCloud model. Has an
optional list of links and href and type attributes. always
Contains the URI to the entity. always Contains the type of the
entity."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, extensiontype_=None):
self.original_tagname_ = None
super(ResourceType, self).__init__(VCloudExtension, extensiontype_, )
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
if Link is None:
self.Link = []
else:
self.Link = Link
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ResourceType.subclass:
return ResourceType.subclass(*args_, **kwargs_)
else:
return ResourceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Link(self): return self.Link
def set_Link(self, Link): self.Link = Link
def add_Link(self, value): self.Link.append(value)
def insert_Link_at(self, index, value): self.Link.insert(index, value)
def replace_Link_at(self, index, value): self.Link[index] = value
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Link or
super(ResourceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ResourceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ResourceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResourceType'):
super(ResourceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceType')
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ResourceType', fromsubclass_=False, pretty_print=True):
super(ResourceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Link_ in self.Link:
Link_.export(outfile, level, namespace_, name_='Link', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ResourceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(ResourceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Link=[\n')
level += 1
for Link_ in self.Link:
showIndent(outfile, level)
outfile.write('model_.LinkType(\n')
Link_.exportLiteral(outfile, level, name_='LinkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ResourceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Link':
obj_ = LinkType.factory()
obj_.build(child_)
self.Link.append(obj_)
obj_.original_tagname_ = 'Link'
super(ResourceType, self).buildChildren(child_, node, nodeName_, True)
# end class ResourceType
class ParamsType(VCloudExtensibleType):
"""0.9 A basic type used to specify parameters for operations. always A
name as parameter."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, name=None, Description=None):
self.original_tagname_ = None
super(ParamsType, self).__init__(VCloudExtension, )
self.name = _cast(None, name)
self.Description = Description
def factory(*args_, **kwargs_):
if ParamsType.subclass:
return ParamsType.subclass(*args_, **kwargs_)
else:
return ParamsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
self.Description is not None or
super(ParamsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ParamsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ParamsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ParamsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ParamsType'):
super(ParamsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ParamsType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ParamsType', fromsubclass_=False, pretty_print=True):
super(ParamsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.Description).encode(ExternalEncoding), input_name='Description'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='ParamsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(ParamsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ParamsType, self).exportLiteralChildren(outfile, level, name_)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=%s,\n' % quote_python(self.Description).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
super(ParamsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
Description_ = child_.text
Description_ = self.gds_validate_string(Description_, node, 'Description')
self.Description = Description_
super(ParamsType, self).buildChildren(child_, node, nodeName_, True)
# end class ParamsType
class ReferenceType(VCloudExtensibleType):
"""0.9 A reference to a resource. Contains an href attribute and
optional name and type attributes. always Contains the URI to
the entity. always The resource identifier, expressed in URN
format. The value of this attribute uniquely identifies the
resource, persists for the life of the resource, and is never
reused. always Contains the type of the the entity. always
Contains the name of the the entity."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
super(ReferenceType, self).__init__(VCloudExtension, extensiontype_, )
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ReferenceType.subclass:
return ReferenceType.subclass(*args_, **kwargs_)
else:
return ReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(ReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferenceType'):
super(ReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferenceType')
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ReferenceType', fromsubclass_=False, pretty_print=True):
super(ReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(ReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ReferenceType
class ResourceReferenceType(ReferenceType):
"""0.9 Represents a reference to a resource. Reference that contains an
href attribute, an optional name and type attributes, and a
resource status attribute. none Status of a resource."""
subclass = None
superclass = ReferenceType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, status=None):
self.original_tagname_ = None
super(ResourceReferenceType, self).__init__(VCloudExtension, href, type_, id, name, )
self.status = _cast(int, status)
def factory(*args_, **kwargs_):
if ResourceReferenceType.subclass:
return ResourceReferenceType.subclass(*args_, **kwargs_)
else:
return ResourceReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_status(self): return self.status
def set_status(self, status): self.status = status
def hasContent_(self):
if (
super(ResourceReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ResourceReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ResourceReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResourceReferenceType'):
super(ResourceReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceReferenceType')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status="%s"' % self.gds_format_integer(self.status, input_name='status'))
def exportChildren(self, outfile, level, namespace_='', name_='ResourceReferenceType', fromsubclass_=False, pretty_print=True):
super(ResourceReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ResourceReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status=%d,\n' % (self.status,))
super(ResourceReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
try:
self.status = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(ResourceReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ResourceReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ResourceReferenceType
class ContainerType(ResourceType):
"""Container for query result sets. none Query name that generated this
result set. none Page of the result set that this container
holds. The first page is page number 1. none Page size, as a
number of records or references. none Total number of records or
references in the container."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None, extensiontype_=None):
self.original_tagname_ = None
super(ContainerType, self).__init__(VCloudExtension, href, type_, Link, extensiontype_, )
self.total = _cast(int, total)
self.name = _cast(None, name)
self.pageSize = _cast(int, pageSize)
self.page = _cast(int, page)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ContainerType.subclass:
return ContainerType.subclass(*args_, **kwargs_)
else:
return ContainerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_total(self): return self.total
def set_total(self, total): self.total = total
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_pageSize(self): return self.pageSize
def set_pageSize(self, pageSize): self.pageSize = pageSize
def get_page(self): return self.page
def set_page(self, page): self.page = page
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(ContainerType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ContainerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContainerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ContainerType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ContainerType'):
super(ContainerType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ContainerType')
if self.total is not None and 'total' not in already_processed:
already_processed.add('total')
outfile.write(' total="%s"' % self.gds_format_integer(self.total, input_name='total'))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.pageSize is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
outfile.write(' pageSize="%s"' % self.gds_format_integer(self.pageSize, input_name='pageSize'))
if self.page is not None and 'page' not in already_processed:
already_processed.add('page')
outfile.write(' page="%s"' % self.gds_format_integer(self.page, input_name='page'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='ContainerType', fromsubclass_=False, pretty_print=True):
super(ContainerType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ContainerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.total is not None and 'total' not in already_processed:
already_processed.add('total')
showIndent(outfile, level)
outfile.write('total=%d,\n' % (self.total,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.pageSize is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
showIndent(outfile, level)
outfile.write('pageSize=%d,\n' % (self.pageSize,))
if self.page is not None and 'page' not in already_processed:
already_processed.add('page')
showIndent(outfile, level)
outfile.write('page=%d,\n' % (self.page,))
super(ContainerType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ContainerType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('total', node)
if value is not None and 'total' not in already_processed:
already_processed.add('total')
try:
self.total = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('pageSize', node)
if value is not None and 'pageSize' not in already_processed:
already_processed.add('pageSize')
try:
self.pageSize = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('page', node)
if value is not None and 'page' not in already_processed:
already_processed.add('page')
try:
self.page = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(ContainerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ContainerType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ContainerType
class CapacityType(VCloudExtensibleType):
"""0.9 Represents a capacity of a given resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Units=None, Allocated=None, Limit=None, extensiontype_=None):
self.original_tagname_ = None
super(CapacityType, self).__init__(VCloudExtension, extensiontype_, )
self.Units = Units
self.Allocated = Allocated
self.Limit = Limit
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CapacityType.subclass:
return CapacityType.subclass(*args_, **kwargs_)
else:
return CapacityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Units(self): return self.Units
def set_Units(self, Units): self.Units = Units
def get_Allocated(self): return self.Allocated
def set_Allocated(self, Allocated): self.Allocated = Allocated
def get_Limit(self): return self.Limit
def set_Limit(self, Limit): self.Limit = Limit
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Units is not None or
self.Allocated is not None or
self.Limit is not None or
super(CapacityType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CapacityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CapacityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CapacityType'):
super(CapacityType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityType')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='CapacityType', fromsubclass_=False, pretty_print=True):
super(CapacityType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Units is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sUnits>%s</%sUnits>%s' % (namespace_, self.gds_format_string(quote_xml(self.Units).encode(ExternalEncoding), input_name='Units'), namespace_, eol_))
if self.Allocated is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAllocated>%s</%sAllocated>%s' % (namespace_, self.gds_format_integer(self.Allocated, input_name='Allocated'), namespace_, eol_))
if self.Limit is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLimit>%s</%sLimit>%s' % (namespace_, self.gds_format_integer(self.Limit, input_name='Limit'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='CapacityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(CapacityType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CapacityType, self).exportLiteralChildren(outfile, level, name_)
if self.Units is not None:
showIndent(outfile, level)
outfile.write('Units=%s,\n' % quote_python(self.Units).encode(ExternalEncoding))
if self.Allocated is not None:
showIndent(outfile, level)
outfile.write('Allocated=%d,\n' % self.Allocated)
if self.Limit is not None:
showIndent(outfile, level)
outfile.write('Limit=%d,\n' % self.Limit)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(CapacityType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Units':
Units_ = child_.text
Units_ = self.gds_validate_string(Units_, node, 'Units')
self.Units = Units_
elif nodeName_ == 'Allocated':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Allocated')
self.Allocated = ival_
elif nodeName_ == 'Limit':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Limit')
self.Limit = ival_
super(CapacityType, self).buildChildren(child_, node, nodeName_, True)
# end class CapacityType
class CapacityWithUsageType(CapacityType):
"""0.9 Represents a capacity and usage of a given resource."""
subclass = None
superclass = CapacityType
def __init__(self, VCloudExtension=None, Units=None, Allocated=None, Limit=None, Used=None, Overhead=None):
self.original_tagname_ = None
super(CapacityWithUsageType, self).__init__(VCloudExtension, Units, Allocated, Limit, )
self.Used = Used
self.Overhead = Overhead
def factory(*args_, **kwargs_):
if CapacityWithUsageType.subclass:
return CapacityWithUsageType.subclass(*args_, **kwargs_)
else:
return CapacityWithUsageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Used(self): return self.Used
def set_Used(self, Used): self.Used = Used
def get_Overhead(self): return self.Overhead
def set_Overhead(self, Overhead): self.Overhead = Overhead
def hasContent_(self):
if (
self.Used is not None or
self.Overhead is not None or
super(CapacityWithUsageType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CapacityWithUsageType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityWithUsageType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CapacityWithUsageType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CapacityWithUsageType'):
super(CapacityWithUsageType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='CapacityWithUsageType')
def exportChildren(self, outfile, level, namespace_='', name_='CapacityWithUsageType', fromsubclass_=False, pretty_print=True):
super(CapacityWithUsageType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Used is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sUsed>%s</%sUsed>%s' % (namespace_, self.gds_format_integer(self.Used, input_name='Used'), namespace_, eol_))
if self.Overhead is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sOverhead>%s</%sOverhead>%s' % (namespace_, self.gds_format_integer(self.Overhead, input_name='Overhead'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='CapacityWithUsageType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(CapacityWithUsageType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(CapacityWithUsageType, self).exportLiteralChildren(outfile, level, name_)
if self.Used is not None:
showIndent(outfile, level)
outfile.write('Used=%d,\n' % self.Used)
if self.Overhead is not None:
showIndent(outfile, level)
outfile.write('Overhead=%d,\n' % self.Overhead)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(CapacityWithUsageType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Used':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Used')
self.Used = ival_
elif nodeName_ == 'Overhead':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Overhead')
self.Overhead = ival_
super(CapacityWithUsageType, self).buildChildren(child_, node, nodeName_, True)
# end class CapacityWithUsageType
class AccessSettingType(VCloudExtensibleType):
"""0.9 Specifies who can access the resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Subject=None, AccessLevel=None):
self.original_tagname_ = None
super(AccessSettingType, self).__init__(VCloudExtension, )
self.Subject = Subject
self.AccessLevel = AccessLevel
def factory(*args_, **kwargs_):
if AccessSettingType.subclass:
return AccessSettingType.subclass(*args_, **kwargs_)
else:
return AccessSettingType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Subject(self): return self.Subject
def set_Subject(self, Subject): self.Subject = Subject
def get_AccessLevel(self): return self.AccessLevel
def set_AccessLevel(self, AccessLevel): self.AccessLevel = AccessLevel
def hasContent_(self):
if (
self.Subject is not None or
self.AccessLevel is not None or
super(AccessSettingType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AccessSettingType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AccessSettingType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AccessSettingType'):
super(AccessSettingType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingType')
def exportChildren(self, outfile, level, namespace_='', name_='AccessSettingType', fromsubclass_=False, pretty_print=True):
super(AccessSettingType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Subject is not None:
self.Subject.export(outfile, level, namespace_, name_='Subject', pretty_print=pretty_print)
if self.AccessLevel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAccessLevel>%s</%sAccessLevel>%s' % (namespace_, self.gds_format_string(quote_xml(self.AccessLevel).encode(ExternalEncoding), input_name='AccessLevel'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='AccessSettingType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AccessSettingType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AccessSettingType, self).exportLiteralChildren(outfile, level, name_)
if self.Subject is not None:
showIndent(outfile, level)
outfile.write('Subject=model_.ReferenceType(\n')
self.Subject.exportLiteral(outfile, level, name_='Subject')
showIndent(outfile, level)
outfile.write('),\n')
if self.AccessLevel is not None:
showIndent(outfile, level)
outfile.write('AccessLevel=%s,\n' % quote_python(self.AccessLevel).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AccessSettingType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Subject':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Subject = obj_
obj_.original_tagname_ = 'Subject'
elif nodeName_ == 'AccessLevel':
AccessLevel_ = child_.text
AccessLevel_ = self.gds_validate_string(AccessLevel_, node, 'AccessLevel')
self.AccessLevel = AccessLevel_
super(AccessSettingType, self).buildChildren(child_, node, nodeName_, True)
# end class AccessSettingType
class AccessSettingsType(VCloudExtensibleType):
"""0.9 A list of access settings for a resource."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, AccessSetting=None):
self.original_tagname_ = None
super(AccessSettingsType, self).__init__(VCloudExtension, )
if AccessSetting is None:
self.AccessSetting = []
else:
self.AccessSetting = AccessSetting
def factory(*args_, **kwargs_):
if AccessSettingsType.subclass:
return AccessSettingsType.subclass(*args_, **kwargs_)
else:
return AccessSettingsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AccessSetting(self): return self.AccessSetting
def set_AccessSetting(self, AccessSetting): self.AccessSetting = AccessSetting
def add_AccessSetting(self, value): self.AccessSetting.append(value)
def insert_AccessSetting_at(self, index, value): self.AccessSetting.insert(index, value)
def replace_AccessSetting_at(self, index, value): self.AccessSetting[index] = value
def hasContent_(self):
if (
self.AccessSetting or
super(AccessSettingsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AccessSettingsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AccessSettingsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AccessSettingsType'):
super(AccessSettingsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AccessSettingsType')
def exportChildren(self, outfile, level, namespace_='', name_='AccessSettingsType', fromsubclass_=False, pretty_print=True):
super(AccessSettingsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for AccessSetting_ in self.AccessSetting:
AccessSetting_.export(outfile, level, namespace_, name_='AccessSetting', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='AccessSettingsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AccessSettingsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AccessSettingsType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('AccessSetting=[\n')
level += 1
for AccessSetting_ in self.AccessSetting:
showIndent(outfile, level)
outfile.write('model_.AccessSettingType(\n')
AccessSetting_.exportLiteral(outfile, level, name_='AccessSettingType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AccessSettingsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AccessSetting':
obj_ = AccessSettingType.factory()
obj_.build(child_)
self.AccessSetting.append(obj_)
obj_.original_tagname_ = 'AccessSetting'
super(AccessSettingsType, self).buildChildren(child_, node, nodeName_, True)
# end class AccessSettingsType
class ControlAccessParamsType(VCloudExtensibleType):
"""0.9 Used to control access to resources."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, IsSharedToEveryone=None, EveryoneAccessLevel=None, AccessSettings=None):
self.original_tagname_ = None
super(ControlAccessParamsType, self).__init__(VCloudExtension, )
self.IsSharedToEveryone = IsSharedToEveryone
self.EveryoneAccessLevel = EveryoneAccessLevel
self.AccessSettings = AccessSettings
def factory(*args_, **kwargs_):
if ControlAccessParamsType.subclass:
return ControlAccessParamsType.subclass(*args_, **kwargs_)
else:
return ControlAccessParamsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_IsSharedToEveryone(self): return self.IsSharedToEveryone
def set_IsSharedToEveryone(self, IsSharedToEveryone): self.IsSharedToEveryone = IsSharedToEveryone
def get_EveryoneAccessLevel(self): return self.EveryoneAccessLevel
def set_EveryoneAccessLevel(self, EveryoneAccessLevel): self.EveryoneAccessLevel = EveryoneAccessLevel
def get_AccessSettings(self): return self.AccessSettings
def set_AccessSettings(self, AccessSettings): self.AccessSettings = AccessSettings
def hasContent_(self):
if (
self.IsSharedToEveryone is not None or
self.EveryoneAccessLevel is not None or
self.AccessSettings is not None or
super(ControlAccessParamsType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ControlAccessParamsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ControlAccessParamsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ControlAccessParamsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ControlAccessParamsType'):
super(ControlAccessParamsType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ControlAccessParamsType')
def exportChildren(self, outfile, level, namespace_='', name_='ControlAccessParamsType', fromsubclass_=False, pretty_print=True):
super(ControlAccessParamsType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.IsSharedToEveryone is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsSharedToEveryone>%s</%sIsSharedToEveryone>%s' % (namespace_, self.gds_format_boolean(self.IsSharedToEveryone, input_name='IsSharedToEveryone'), namespace_, eol_))
if self.EveryoneAccessLevel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sEveryoneAccessLevel>%s</%sEveryoneAccessLevel>%s' % (namespace_, self.gds_format_string(quote_xml(self.EveryoneAccessLevel).encode(ExternalEncoding), input_name='EveryoneAccessLevel'), namespace_, eol_))
if self.AccessSettings is not None:
self.AccessSettings.export(outfile, level, namespace_, name_='AccessSettings', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ControlAccessParamsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ControlAccessParamsType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ControlAccessParamsType, self).exportLiteralChildren(outfile, level, name_)
if self.IsSharedToEveryone is not None:
showIndent(outfile, level)
outfile.write('IsSharedToEveryone=%s,\n' % self.IsSharedToEveryone)
if self.EveryoneAccessLevel is not None:
showIndent(outfile, level)
outfile.write('EveryoneAccessLevel=%s,\n' % quote_python(self.EveryoneAccessLevel).encode(ExternalEncoding))
if self.AccessSettings is not None:
showIndent(outfile, level)
outfile.write('AccessSettings=model_.AccessSettingsType(\n')
self.AccessSettings.exportLiteral(outfile, level, name_='AccessSettings')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ControlAccessParamsType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'IsSharedToEveryone':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'IsSharedToEveryone')
self.IsSharedToEveryone = ival_
elif nodeName_ == 'EveryoneAccessLevel':
EveryoneAccessLevel_ = child_.text
EveryoneAccessLevel_ = self.gds_validate_string(EveryoneAccessLevel_, node, 'EveryoneAccessLevel')
self.EveryoneAccessLevel = EveryoneAccessLevel_
elif nodeName_ == 'AccessSettings':
obj_ = AccessSettingsType.factory()
obj_.build(child_)
self.AccessSettings = obj_
obj_.original_tagname_ = 'AccessSettings'
super(ControlAccessParamsType, self).buildChildren(child_, node, nodeName_, True)
# end class ControlAccessParamsType
class OwnerType(ResourceType):
"""1.5 Represents the owner of this entity."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, User=None):
self.original_tagname_ = None
super(OwnerType, self).__init__(VCloudExtension, href, type_, Link, )
self.User = User
def factory(*args_, **kwargs_):
if OwnerType.subclass:
return OwnerType.subclass(*args_, **kwargs_)
else:
return OwnerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_User(self): return self.User
def set_User(self, User): self.User = User
def hasContent_(self):
if (
self.User is not None or
super(OwnerType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='OwnerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='OwnerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='OwnerType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='OwnerType'):
super(OwnerType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='OwnerType')
def exportChildren(self, outfile, level, namespace_='', name_='OwnerType', fromsubclass_=False, pretty_print=True):
super(OwnerType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.User is not None:
self.User.export(outfile, level, namespace_, name_='User', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='OwnerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(OwnerType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(OwnerType, self).exportLiteralChildren(outfile, level, name_)
if self.User is not None:
showIndent(outfile, level)
outfile.write('User=model_.ReferenceType(\n')
self.User.exportLiteral(outfile, level, name_='User')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(OwnerType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'User':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.User = obj_
obj_.original_tagname_ = 'User'
super(OwnerType, self).buildChildren(child_, node, nodeName_, True)
# end class OwnerType
class ReferencesType(ContainerType):
"""This is the container for returned elements in referenceView"""
subclass = None
superclass = ContainerType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None, Reference=None):
self.original_tagname_ = None
super(ReferencesType, self).__init__(VCloudExtension, href, type_, Link, total, name, pageSize, page, )
if Reference is None:
self.Reference = []
else:
self.Reference = Reference
def factory(*args_, **kwargs_):
if ReferencesType.subclass:
return ReferencesType.subclass(*args_, **kwargs_)
else:
return ReferencesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Reference(self): return self.Reference
def set_Reference(self, Reference): self.Reference = Reference
def add_Reference(self, value): self.Reference.append(value)
def insert_Reference_at(self, index, value): self.Reference.insert(index, value)
def replace_Reference_at(self, index, value): self.Reference[index] = value
def hasContent_(self):
if (
self.Reference or
super(ReferencesType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ReferencesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ReferencesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ReferencesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReferencesType'):
super(ReferencesType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ReferencesType')
def exportChildren(self, outfile, level, namespace_='', name_='ReferencesType', fromsubclass_=False, pretty_print=True):
super(ReferencesType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Reference_ in self.Reference:
Reference_.export(outfile, level, namespace_, name_='Reference', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ReferencesType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ReferencesType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ReferencesType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Reference=[\n')
level += 1
for Reference_ in self.Reference:
showIndent(outfile, level)
outfile.write('model_.Reference(\n')
Reference_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ReferencesType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Reference':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Reference.append(obj_)
obj_.original_tagname_ = 'Reference'
super(ReferencesType, self).buildChildren(child_, node, nodeName_, True)
# end class ReferencesType
class QueryListType(ContainerType):
"""Container for the list of typed queries available to the requesting
user."""
subclass = None
superclass = ContainerType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, total=None, name=None, pageSize=None, page=None):
self.original_tagname_ = None
super(QueryListType, self).__init__(VCloudExtension, href, type_, Link, total, name, pageSize, page, )
def factory(*args_, **kwargs_):
if QueryListType.subclass:
return QueryListType.subclass(*args_, **kwargs_)
else:
return QueryListType(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
super(QueryListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='QueryListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='QueryListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='QueryListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='QueryListType'):
super(QueryListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='QueryListType')
def exportChildren(self, outfile, level, namespace_='', name_='QueryListType', fromsubclass_=False, pretty_print=True):
super(QueryListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='QueryListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(QueryListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(QueryListType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(QueryListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(QueryListType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class QueryListType
class MetadataEntryType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Key=None, Value=None):
self.original_tagname_ = None
super(MetadataEntryType, self).__init__(VCloudExtension, href, type_, Link, )
self.Key = Key
self.Value = Value
def factory(*args_, **kwargs_):
if MetadataEntryType.subclass:
return MetadataEntryType.subclass(*args_, **kwargs_)
else:
return MetadataEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Key(self): return self.Key
def set_Key(self, Key): self.Key = Key
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def hasContent_(self):
if (
self.Key is not None or
self.Value is not None or
super(MetadataEntryType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataEntryType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataEntryType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataEntryType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataEntryType'):
super(MetadataEntryType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataEntryType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataEntryType', fromsubclass_=False, pretty_print=True):
super(MetadataEntryType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (namespace_, self.gds_format_string(quote_xml(self.Key).encode(ExternalEncoding), input_name='Key'), namespace_, eol_))
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (namespace_, self.gds_format_string(quote_xml(self.Value).encode(ExternalEncoding), input_name='Value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='MetadataEntryType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataEntryType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataEntryType, self).exportLiteralChildren(outfile, level, name_)
if self.Key is not None:
showIndent(outfile, level)
outfile.write('Key=%s,\n' % quote_python(self.Key).encode(ExternalEncoding))
if self.Value is not None:
showIndent(outfile, level)
outfile.write('Value=%s,\n' % quote_python(self.Value).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataEntryType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Key':
Key_ = child_.text
Key_ = self.gds_validate_string(Key_, node, 'Key')
self.Key = Key_
elif nodeName_ == 'Value':
Value_ = child_.text
Value_ = self.gds_validate_string(Value_, node, 'Value')
self.Value = Value_
super(MetadataEntryType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataEntryType
class MetadataType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, MetadataEntry=None):
self.original_tagname_ = None
super(MetadataType, self).__init__(VCloudExtension, href, type_, Link, )
if MetadataEntry is None:
self.MetadataEntry = []
else:
self.MetadataEntry = MetadataEntry
def factory(*args_, **kwargs_):
if MetadataType.subclass:
return MetadataType.subclass(*args_, **kwargs_)
else:
return MetadataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MetadataEntry(self): return self.MetadataEntry
def set_MetadataEntry(self, MetadataEntry): self.MetadataEntry = MetadataEntry
def add_MetadataEntry(self, value): self.MetadataEntry.append(value)
def insert_MetadataEntry_at(self, index, value): self.MetadataEntry.insert(index, value)
def replace_MetadataEntry_at(self, index, value): self.MetadataEntry[index] = value
def hasContent_(self):
if (
self.MetadataEntry or
super(MetadataType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataType'):
super(MetadataType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataType', fromsubclass_=False, pretty_print=True):
super(MetadataType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for MetadataEntry_ in self.MetadataEntry:
MetadataEntry_.export(outfile, level, namespace_, name_='MetadataEntry', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MetadataType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('MetadataEntry=[\n')
level += 1
for MetadataEntry_ in self.MetadataEntry:
showIndent(outfile, level)
outfile.write('model_.MetadataEntryType(\n')
MetadataEntry_.exportLiteral(outfile, level, name_='MetadataEntryType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'MetadataEntry':
obj_ = MetadataEntryType.factory()
obj_.build(child_)
self.MetadataEntry.append(obj_)
obj_.original_tagname_ = 'MetadataEntry'
super(MetadataType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataType
class MetadataValueType(ResourceType):
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Value=None):
self.original_tagname_ = None
super(MetadataValueType, self).__init__(VCloudExtension, href, type_, Link, )
self.Value = Value
def factory(*args_, **kwargs_):
if MetadataValueType.subclass:
return MetadataValueType.subclass(*args_, **kwargs_)
else:
return MetadataValueType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def hasContent_(self):
if (
self.Value is not None or
super(MetadataValueType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetadataValueType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataValueType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetadataValueType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetadataValueType'):
super(MetadataValueType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='MetadataValueType')
def exportChildren(self, outfile, level, namespace_='', name_='MetadataValueType', fromsubclass_=False, pretty_print=True):
super(MetadataValueType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (namespace_, self.gds_format_string(quote_xml(self.Value).encode(ExternalEncoding), input_name='Value'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='MetadataValueType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(MetadataValueType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(MetadataValueType, self).exportLiteralChildren(outfile, level, name_)
if self.Value is not None:
showIndent(outfile, level)
outfile.write('Value=%s,\n' % quote_python(self.Value).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(MetadataValueType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Value':
Value_ = child_.text
Value_ = self.gds_validate_string(Value_, node, 'Value')
self.Value = Value_
super(MetadataValueType, self).buildChildren(child_, node, nodeName_, True)
# end class MetadataValueType
class EntityType(ResourceType):
"""0.9 Basic entity type in the vCloud object model. Includes a name,
an optional description, and an optional list of links. always
The name of the entity. none The entity identifier, expressed in
URN format. The value of this attribute uniquely identifies the
entity, persists for the life of the entity, and is never
reused."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, name=None, id=None, Description=None, Tasks=None, extensiontype_=None):
self.original_tagname_ = None
super(EntityType, self).__init__(VCloudExtension, href, type_, Link, extensiontype_, )
self.name = _cast(None, name)
self.id = _cast(None, id)
self.Description = Description
self.Tasks = Tasks
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityType.subclass:
return EntityType.subclass(*args_, **kwargs_)
else:
return EntityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Tasks(self): return self.Tasks
def set_Tasks(self, Tasks): self.Tasks = Tasks
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Description is not None or
self.Tasks is not None or
super(EntityType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityType'):
super(EntityType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityType', fromsubclass_=False, pretty_print=True):
super(EntityType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.Description).encode(ExternalEncoding), input_name='Description'), namespace_, eol_))
if self.Tasks is not None:
self.Tasks.export(outfile, level, namespace_, name_='Tasks', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
super(EntityType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityType, self).exportLiteralChildren(outfile, level, name_)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=%s,\n' % quote_python(self.Description).encode(ExternalEncoding))
if self.Tasks is not None:
showIndent(outfile, level)
outfile.write('Tasks=model_.TasksInProgressType(\n')
self.Tasks.exportLiteral(outfile, level, name_='Tasks')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(EntityType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
Description_ = child_.text
Description_ = self.gds_validate_string(Description_, node, 'Description')
self.Description = Description_
elif nodeName_ == 'Tasks':
obj_ = TasksInProgressType.factory()
obj_.build(child_)
self.Tasks = obj_
obj_.original_tagname_ = 'Tasks'
super(EntityType, self).buildChildren(child_, node, nodeName_, True)
# end class EntityType
class EntityReferenceType(VCloudExtensibleType):
"""1.5 A reference to a vCloud entity. none The object identifier,
expressed in URN format. The value of this attribute uniquely
identifies the object, persists for the life of the object, and
is never reused. This context-free identifier can apply to any
object in any system. always The type of the the referenced
object. always The name of the referenced object."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, type_=None, id=None, name=None, extensiontype_=None):
self.original_tagname_ = None
super(EntityReferenceType, self).__init__(VCloudExtension, extensiontype_, )
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if EntityReferenceType.subclass:
return EntityReferenceType.subclass(*args_, **kwargs_)
else:
return EntityReferenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(EntityReferenceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityReferenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityReferenceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityReferenceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityReferenceType'):
super(EntityReferenceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityReferenceType')
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='EntityReferenceType', fromsubclass_=False, pretty_print=True):
super(EntityReferenceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityReferenceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(EntityReferenceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityReferenceType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(EntityReferenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EntityReferenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EntityReferenceType
class EntityLinkType(EntityReferenceType):
"""1.5 Extends EntityReference type by adding relation attribute.
always Defines the relationship of the link to the object that
contains it. A relationship can be the name of an operation on
the object, a reference to a contained or containing object, or
a reference to an alternate representation of the object. The
relationship value implies the HTTP verb to use when you use the
link's href value as a request URL."""
subclass = None
superclass = EntityReferenceType
def __init__(self, VCloudExtension=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(EntityLinkType, self).__init__(VCloudExtension, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if EntityLinkType.subclass:
return EntityLinkType.subclass(*args_, **kwargs_)
else:
return EntityLinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(EntityLinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='EntityLinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EntityLinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='EntityLinkType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EntityLinkType'):
super(EntityLinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EntityLinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='EntityLinkType', fromsubclass_=False, pretty_print=True):
super(EntityLinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='EntityLinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(EntityLinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EntityLinkType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(EntityLinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(EntityLinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class EntityLinkType
class TasksInProgressType(VCloudExtensibleType):
"""0.9 A list of queued, running, or recently completed tasks."""
subclass = None
superclass = VCloudExtensibleType
def __init__(self, VCloudExtension=None, Task=None):
self.original_tagname_ = None
super(TasksInProgressType, self).__init__(VCloudExtension, )
if Task is None:
self.Task = []
else:
self.Task = Task
def factory(*args_, **kwargs_):
if TasksInProgressType.subclass:
return TasksInProgressType.subclass(*args_, **kwargs_)
else:
return TasksInProgressType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Task(self): return self.Task
def set_Task(self, Task): self.Task = Task
def add_Task(self, value): self.Task.append(value)
def insert_Task_at(self, index, value): self.Task.insert(index, value)
def replace_Task_at(self, index, value): self.Task[index] = value
def hasContent_(self):
if (
self.Task or
super(TasksInProgressType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TasksInProgressType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TasksInProgressType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TasksInProgressType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TasksInProgressType'):
super(TasksInProgressType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TasksInProgressType')
def exportChildren(self, outfile, level, namespace_='', name_='TasksInProgressType', fromsubclass_=False, pretty_print=True):
super(TasksInProgressType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Task_ in self.Task:
Task_.export(outfile, level, namespace_, name_='Task', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='TasksInProgressType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TasksInProgressType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TasksInProgressType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Task=[\n')
level += 1
for Task_ in self.Task:
showIndent(outfile, level)
outfile.write('model_.TaskType(\n')
Task_.exportLiteral(outfile, level, name_='TaskType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TasksInProgressType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Task':
obj_ = TaskType.factory()
obj_.build(child_)
self.Task.append(obj_)
obj_.original_tagname_ = 'Task'
super(TasksInProgressType, self).buildChildren(child_, node, nodeName_, True)
# end class TasksInProgressType
class TaskType(EntityType):
"""0.9 Represents an asynchronous or long-running task in the vCloud
environment. none The execution status of the task. One of:
queued (The task has been queued for execution.), preRunning
(The task is awaiting preprocessing or, if it is a blocking
task, administrative action.), running (The task is runnning.),
success (The task completed with a status of success.), error
(The task encountered an error while running.), canceled (The
task was canceled by the owner or an administrator.), aborted
(The task was aborted by an administrative action.) none The
display name of the operation that is tracked by this task. none
The name of the operation that is tracked by this task. none The
date and time the system started executing the task. May not be
present if the task hasn't been executed yet. none The date and
time that processing of the task was completed. May not be
present if the task is still being executed. none The date and
time at which the task resource will be destroyed and no longer
available for retrieval. May not be present if the task has not
been executed or is still being executed."""
subclass = None
superclass = EntityType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, name=None, id=None, Description=None, Tasks=None, status=None, operationName=None, expiryTime=None, startTime=None, operation=None, endTime=None, Owner=None, Error=None, User=None, Organization=None, Progress=None, Params=None):
self.original_tagname_ = None
super(TaskType, self).__init__(VCloudExtension, href, type_, Link, name, id, Description, Tasks, )
self.status = _cast(None, status)
self.operationName = _cast(None, operationName)
if isinstance(expiryTime, basestring):
initvalue_ = datetime_.datetime.strptime(expiryTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = expiryTime
self.expiryTime = initvalue_
if isinstance(startTime, basestring):
initvalue_ = datetime_.datetime.strptime(startTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = startTime
self.startTime = initvalue_
self.operation = _cast(None, operation)
if isinstance(endTime, basestring):
initvalue_ = datetime_.datetime.strptime(endTime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = endTime
self.endTime = initvalue_
self.Owner = Owner
self.Error = Error
self.User = User
self.Organization = Organization
self.Progress = Progress
self.Params = Params
def factory(*args_, **kwargs_):
if TaskType.subclass:
return TaskType.subclass(*args_, **kwargs_)
else:
return TaskType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Owner(self): return self.Owner
def set_Owner(self, Owner): self.Owner = Owner
def get_Error(self): return self.Error
def set_Error(self, Error): self.Error = Error
def get_User(self): return self.User
def set_User(self, User): self.User = User
def get_Organization(self): return self.Organization
def set_Organization(self, Organization): self.Organization = Organization
def get_Progress(self): return self.Progress
def set_Progress(self, Progress): self.Progress = Progress
def get_Params(self): return self.Params
def set_Params(self, Params): self.Params = Params
def get_status(self): return self.status
def set_status(self, status): self.status = status
def get_operationName(self): return self.operationName
def set_operationName(self, operationName): self.operationName = operationName
def get_expiryTime(self): return self.expiryTime
def set_expiryTime(self, expiryTime): self.expiryTime = expiryTime
def get_startTime(self): return self.startTime
def set_startTime(self, startTime): self.startTime = startTime
def get_operation(self): return self.operation
def set_operation(self, operation): self.operation = operation
def get_endTime(self): return self.endTime
def set_endTime(self, endTime): self.endTime = endTime
def hasContent_(self):
if (
self.Owner is not None or
self.Error is not None or
self.User is not None or
self.Organization is not None or
self.Progress is not None or
self.Params is not None or
super(TaskType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TaskType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaskType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TaskType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaskType'):
super(TaskType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaskType')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status=%s' % (self.gds_format_string(quote_attrib(self.status).encode(ExternalEncoding), input_name='status'), ))
if self.operationName is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
outfile.write(' operationName=%s' % (self.gds_format_string(quote_attrib(self.operationName).encode(ExternalEncoding), input_name='operationName'), ))
if self.expiryTime is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
outfile.write(' expiryTime="%s"' % self.gds_format_datetime(self.expiryTime, input_name='expiryTime'))
if self.startTime is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
outfile.write(' startTime="%s"' % self.gds_format_datetime(self.startTime, input_name='startTime'))
if self.operation is not None and 'operation' not in already_processed:
already_processed.add('operation')
outfile.write(' operation=%s' % (self.gds_format_string(quote_attrib(self.operation).encode(ExternalEncoding), input_name='operation'), ))
if self.endTime is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
outfile.write(' endTime="%s"' % self.gds_format_datetime(self.endTime, input_name='endTime'))
def exportChildren(self, outfile, level, namespace_='', name_='TaskType', fromsubclass_=False, pretty_print=True):
super(TaskType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Owner is not None:
self.Owner.export(outfile, level, namespace_, name_='Owner', pretty_print=pretty_print)
if self.Error is not None:
self.Error.export(outfile, level, namespace_, name_='Error', pretty_print=pretty_print)
if self.User is not None:
self.User.export(outfile, level, namespace_, name_='User', pretty_print=pretty_print)
if self.Organization is not None:
self.Organization.export(outfile, level, namespace_, name_='Organization', pretty_print=pretty_print)
if self.Progress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sProgress>%s</%sProgress>%s' % (namespace_, self.gds_format_integer(self.Progress, input_name='Progress'), namespace_, eol_))
if self.Params is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sParams>%s</%sParams>%s' % (namespace_, self.gds_format_string(quote_xml(self.Params).encode(ExternalEncoding), input_name='Params'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='TaskType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status="%s",\n' % (self.status,))
if self.operationName is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
showIndent(outfile, level)
outfile.write('operationName="%s",\n' % (self.operationName,))
if self.expiryTime is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
showIndent(outfile, level)
outfile.write('expiryTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.expiryTime, input_name='expiryTime'))
if self.startTime is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
showIndent(outfile, level)
outfile.write('startTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.startTime, input_name='startTime'))
if self.operation is not None and 'operation' not in already_processed:
already_processed.add('operation')
showIndent(outfile, level)
outfile.write('operation="%s",\n' % (self.operation,))
if self.endTime is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
showIndent(outfile, level)
outfile.write('endTime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.endTime, input_name='endTime'))
super(TaskType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaskType, self).exportLiteralChildren(outfile, level, name_)
if self.Owner is not None:
showIndent(outfile, level)
outfile.write('Owner=model_.ReferenceType(\n')
self.Owner.exportLiteral(outfile, level, name_='Owner')
showIndent(outfile, level)
outfile.write('),\n')
if self.Error is not None:
showIndent(outfile, level)
outfile.write('Error=model_.ErrorType(\n')
self.Error.exportLiteral(outfile, level, name_='Error')
showIndent(outfile, level)
outfile.write('),\n')
if self.User is not None:
showIndent(outfile, level)
outfile.write('User=model_.ReferenceType(\n')
self.User.exportLiteral(outfile, level, name_='User')
showIndent(outfile, level)
outfile.write('),\n')
if self.Organization is not None:
showIndent(outfile, level)
outfile.write('Organization=model_.ReferenceType(\n')
self.Organization.exportLiteral(outfile, level, name_='Organization')
showIndent(outfile, level)
outfile.write('),\n')
if self.Progress is not None:
showIndent(outfile, level)
outfile.write('Progress=%d,\n' % self.Progress)
if self.Params is not None:
showIndent(outfile, level)
outfile.write('Params=%s,\n' % quote_python(self.Params).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
self.status = value
value = find_attr_value_('operationName', node)
if value is not None and 'operationName' not in already_processed:
already_processed.add('operationName')
self.operationName = value
value = find_attr_value_('expiryTime', node)
if value is not None and 'expiryTime' not in already_processed:
already_processed.add('expiryTime')
try:
self.expiryTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (expiryTime): %s' % exp)
value = find_attr_value_('startTime', node)
if value is not None and 'startTime' not in already_processed:
already_processed.add('startTime')
try:
self.startTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (startTime): %s' % exp)
value = find_attr_value_('operation', node)
if value is not None and 'operation' not in already_processed:
already_processed.add('operation')
self.operation = value
value = find_attr_value_('endTime', node)
if value is not None and 'endTime' not in already_processed:
already_processed.add('endTime')
try:
self.endTime = self.gds_parse_datetime(value)
except ValueError as exp:
raise ValueError('Bad date-time attribute (endTime): %s' % exp)
super(TaskType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Owner':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'Error':
obj_ = ErrorType.factory()
obj_.build(child_)
self.Error = obj_
obj_.original_tagname_ = 'Error'
elif nodeName_ == 'User':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.User = obj_
obj_.original_tagname_ = 'User'
elif nodeName_ == 'Organization':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Organization = obj_
obj_.original_tagname_ = 'Organization'
elif nodeName_ == 'Progress':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Progress')
self.Progress = ival_
elif nodeName_ == 'Params':
Params_ = child_.text
Params_ = self.gds_validate_string(Params_, node, 'Params')
self.Params = Params_
super(TaskType, self).buildChildren(child_, node, nodeName_, True)
# end class TaskType
class TaskOperationListType(ResourceType):
"""List of operation names. 1.5"""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, Operation=None):
self.original_tagname_ = None
super(TaskOperationListType, self).__init__(VCloudExtension, href, type_, Link, )
if Operation is None:
self.Operation = []
else:
self.Operation = Operation
def factory(*args_, **kwargs_):
if TaskOperationListType.subclass:
return TaskOperationListType.subclass(*args_, **kwargs_)
else:
return TaskOperationListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Operation(self): return self.Operation
def set_Operation(self, Operation): self.Operation = Operation
def add_Operation(self, value): self.Operation.append(value)
def insert_Operation_at(self, index, value): self.Operation.insert(index, value)
def replace_Operation_at(self, index, value): self.Operation[index] = value
def hasContent_(self):
if (
self.Operation or
super(TaskOperationListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TaskOperationListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TaskOperationListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TaskOperationListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TaskOperationListType'):
super(TaskOperationListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TaskOperationListType')
def exportChildren(self, outfile, level, namespace_='', name_='TaskOperationListType', fromsubclass_=False, pretty_print=True):
super(TaskOperationListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Operation_ in self.Operation:
showIndent(outfile, level, pretty_print)
outfile.write('<%sOperation>%s</%sOperation>%s' % (namespace_, self.gds_format_string(quote_xml(Operation_).encode(ExternalEncoding), input_name='Operation'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='TaskOperationListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TaskOperationListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TaskOperationListType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Operation=[\n')
level += 1
for Operation_ in self.Operation:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(Operation_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(TaskOperationListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Operation':
Operation_ = child_.text
Operation_ = self.gds_validate_string(Operation_, node, 'Operation')
self.Operation.append(Operation_)
super(TaskOperationListType, self).buildChildren(child_, node, nodeName_, True)
# end class TaskOperationListType
class LinkType(ReferenceType):
"""0.9 Extends reference type by adding relation attribute. Defines a
hyper-link with a relationship, hyper-link reference, and an
optional media type. always Defines the relationship of the link
to the object that contains it. A relationship can be the name
of an operation on the object, a reference to a contained or
containing object, or a reference to an alternate representation
of the object. The relationship value implies the HTTP verb to
use when you use the link's href as a request URL."""
subclass = None
superclass = ReferenceType
def __init__(self, VCloudExtension=None, href=None, type_=None, id=None, name=None, rel=None):
self.original_tagname_ = None
super(LinkType, self).__init__(VCloudExtension, href, type_, id, name, )
self.rel = _cast(None, rel)
def factory(*args_, **kwargs_):
if LinkType.subclass:
return LinkType.subclass(*args_, **kwargs_)
else:
return LinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def hasContent_(self):
if (
super(LinkType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LinkType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LinkType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LinkType'):
super(LinkType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (self.gds_format_string(quote_attrib(self.rel).encode(ExternalEncoding), input_name='rel'), ))
def exportChildren(self, outfile, level, namespace_='', name_='LinkType', fromsubclass_=False, pretty_print=True):
super(LinkType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='LinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel="%s",\n' % (self.rel,))
super(LinkType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(LinkType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
super(LinkType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(LinkType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class LinkType
class VdcTemplateListType(ResourceType):
"""0.9 Represents an VDC Template list."""
subclass = None
superclass = ResourceType
def __init__(self, VCloudExtension=None, href=None, type_=None, Link=None, VdcTemplate=None):
self.original_tagname_ = None
super(VdcTemplateListType, self).__init__(VCloudExtension, href, type_, Link, )
if VdcTemplate is None:
self.VdcTemplate = []
else:
self.VdcTemplate = VdcTemplate
def factory(*args_, **kwargs_):
if VdcTemplateListType.subclass:
return VdcTemplateListType.subclass(*args_, **kwargs_)
else:
return VdcTemplateListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_VdcTemplate(self): return self.VdcTemplate
def set_VdcTemplate(self, VdcTemplate): self.VdcTemplate = VdcTemplate
def add_VdcTemplate(self, value): self.VdcTemplate.append(value)
def insert_VdcTemplate_at(self, index, value): self.VdcTemplate.insert(index, value)
def replace_VdcTemplate_at(self, index, value): self.VdcTemplate[index] = value
def hasContent_(self):
if (
self.VdcTemplate or
super(VdcTemplateListType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='VdcTemplateListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VdcTemplateListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='VdcTemplateListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='VdcTemplateListType'):
super(VdcTemplateListType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VdcTemplateListType')
def exportChildren(self, outfile, level, namespace_='', name_='VdcTemplateListType', fromsubclass_=False, pretty_print=True):
super(VdcTemplateListType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for VdcTemplate_ in self.VdcTemplate:
VdcTemplate_.export(outfile, level, namespace_, name_='VdcTemplate', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='VdcTemplateListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(VdcTemplateListType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VdcTemplateListType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('VdcTemplate=[\n')
level += 1
for VdcTemplate_ in self.VdcTemplate:
showIndent(outfile, level)
outfile.write('model_.ReferenceType(\n')
VdcTemplate_.exportLiteral(outfile, level, name_='ReferenceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(VdcTemplateListType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'VdcTemplate':
class_obj_ = self.get_class_obj_(child_, ReferenceType)
obj_ = class_obj_.factory()
obj_.build(child_)
self.VdcTemplate.append(obj_)
obj_.original_tagname_ = 'VdcTemplate'
super(VdcTemplateListType, self).buildChildren(child_, node, nodeName_, True)
# end class VdcTemplateListType
GDSClassesMapping = {
'VdcTemplateList': VdcTemplateListType,
'Metadata': MetadataType,
'AccessSettings': AccessSettingsType,
'Reference': ReferenceType,
'AccessSetting': AccessSettingType,
'ControlAccessParams': ControlAccessParamsType,
'MetadataEntry': MetadataEntryType,
'QueryList': QueryListType,
'Entity': EntityType,
'VdcTemplate': ReferenceType,
'Tasks': TasksInProgressType,
'Task': TaskType,
'MetadataValue': MetadataValueType,
'Link': LinkType,
'User': ReferenceType,
'Organization': ReferenceType,
'Error': ErrorType,
'Owner': ReferenceType,
'References': ReferencesType,
'VCloudExtension': VCloudExtensionType,
'Subject': ReferenceType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'VdcTemplateListType'
rootClass = VdcTemplateListType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from VdcTemplateList import *\n\n')
sys.stdout.write('import VdcTemplateList as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AccessSettingType",
"AccessSettingsType",
"CapacityType",
"CapacityWithUsageType",
"ContainerType",
"ControlAccessParamsType",
"EntityLinkType",
"EntityReferenceType",
"EntityType",
"ErrorType",
"LinkType",
"MetadataEntryType",
"MetadataType",
"MetadataValueType",
"OwnerType",
"ParamsType",
"QueryListType",
"ReferenceType",
"ReferencesType",
"ResourceReferenceType",
"ResourceType",
"TaskOperationListType",
"TaskType",
"TasksInProgressType",
"VCloudExtensibleType",
"VCloudExtensionType",
"VdcTemplateListType"
]
| en | 0.787102 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated Tue Apr 14 22:18:33 2015 by generateDS.py version 2.15a. # # Command line options: # ('-o', 'schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py') # # Command line arguments: # /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd # # Command line: # /home/eli/qa/.venv/src/pyvcloud/.venv/bin/generateDS.py -o "schema/vcd/v1_5/schemas/vcloud/VdcTemplateList.py" /home/eli/perl-VMware-vCloud/etc/1.5/schemas/vcloud/VdcTemplateList.xsd # # Current working directory (os.getcwd()): # pyvcloud # # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. # # User methods # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class # in a module named generatedssuper.py. # pat is a list of lists of strings/patterns. We should: # - AND the outer elements # - OR the inner elements # # If you have installed IPython you can uncomment and use the following. # IPython is available from http://ipython.scipy.org/. # ## from IPython.Shell import IPShellEmbed ## args = '' ## ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', ## exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: # ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit') # # Globals # # # Support/utility functions. # %s %s # Constants for category: # Constants for content_type: # Prevent exporting empty content as empty lines. # category == MixedContainer.CategoryComplex # Prevent exporting empty content as empty lines. # category == MixedContainer.CategoryComplex # category == MixedContainer.CategoryComplex # # Data representation classes. # 0.9 Cloud API extension type with any elements and any attributes. always Determines whether server should fail if extension is not understood. # end class VCloudExtensionType 0.9 A base abstract type for all complex types that support extensions. # end class VCloudExtensibleType 0.9 The standard error message type used in the vCloud REST API. none An one line, human-readable message describing the error that occurred. none The class of the error. Matches the HTTP status code. none Specific API error code (for example - can indicate that vApp power on failed by some reason) none A vendor/implementation specific error code that point to specific modules/parts of the code and can make problem diagnostics easier. 1.0none The stack trace of the exception which when examined might make problem diagnostics easier. # end class ErrorType 0.9 The base type for all objects in the vCloud model. Has an optional list of links and href and type attributes. always Contains the URI to the entity. always Contains the type of the entity. # end class ResourceType 0.9 A basic type used to specify parameters for operations. always A name as parameter. # end class ParamsType 0.9 A reference to a resource. Contains an href attribute and optional name and type attributes. always Contains the URI to the entity. always The resource identifier, expressed in URN format. The value of this attribute uniquely identifies the resource, persists for the life of the resource, and is never reused. always Contains the type of the the entity. always Contains the name of the the entity. # end class ReferenceType 0.9 Represents a reference to a resource. Reference that contains an href attribute, an optional name and type attributes, and a resource status attribute. none Status of a resource. # end class ResourceReferenceType Container for query result sets. none Query name that generated this result set. none Page of the result set that this container holds. The first page is page number 1. none Page size, as a number of records or references. none Total number of records or references in the container. # end class ContainerType 0.9 Represents a capacity of a given resource. # end class CapacityType 0.9 Represents a capacity and usage of a given resource. # end class CapacityWithUsageType 0.9 Specifies who can access the resource. # end class AccessSettingType 0.9 A list of access settings for a resource. # end class AccessSettingsType 0.9 Used to control access to resources. # end class ControlAccessParamsType 1.5 Represents the owner of this entity. # end class OwnerType This is the container for returned elements in referenceView # end class ReferencesType Container for the list of typed queries available to the requesting user. # end class QueryListType # end class MetadataEntryType # end class MetadataType # end class MetadataValueType 0.9 Basic entity type in the vCloud object model. Includes a name, an optional description, and an optional list of links. always The name of the entity. none The entity identifier, expressed in URN format. The value of this attribute uniquely identifies the entity, persists for the life of the entity, and is never reused. # end class EntityType 1.5 A reference to a vCloud entity. none The object identifier, expressed in URN format. The value of this attribute uniquely identifies the object, persists for the life of the object, and is never reused. This context-free identifier can apply to any object in any system. always The type of the the referenced object. always The name of the referenced object. # end class EntityReferenceType 1.5 Extends EntityReference type by adding relation attribute. always Defines the relationship of the link to the object that contains it. A relationship can be the name of an operation on the object, a reference to a contained or containing object, or a reference to an alternate representation of the object. The relationship value implies the HTTP verb to use when you use the link's href value as a request URL. # end class EntityLinkType 0.9 A list of queued, running, or recently completed tasks. # end class TasksInProgressType 0.9 Represents an asynchronous or long-running task in the vCloud environment. none The execution status of the task. One of: queued (The task has been queued for execution.), preRunning (The task is awaiting preprocessing or, if it is a blocking task, administrative action.), running (The task is runnning.), success (The task completed with a status of success.), error (The task encountered an error while running.), canceled (The task was canceled by the owner or an administrator.), aborted (The task was aborted by an administrative action.) none The display name of the operation that is tracked by this task. none The name of the operation that is tracked by this task. none The date and time the system started executing the task. May not be present if the task hasn't been executed yet. none The date and time that processing of the task was completed. May not be present if the task is still being executed. none The date and time at which the task resource will be destroyed and no longer available for retrieval. May not be present if the task has not been executed or is still being executed. # end class TaskType List of operation names. 1.5 # end class TaskOperationListType 0.9 Extends reference type by adding relation attribute. Defines a hyper-link with a relationship, hyper-link reference, and an optional media type. always Defines the relationship of the link to the object that contains it. A relationship can be the name of an operation on the object, a reference to a contained or containing object, or a reference to an alternate representation of the object. The relationship value implies the HTTP verb to use when you use the link's href as a request URL. # end class LinkType 0.9 Represents an VDC Template list. # end class VdcTemplateListType Usage: python <Parser>.py [ -s ] <in_xml_file> # Enable Python to collect the space used by the DOM. # Enable Python to collect the space used by the DOM. # Enable Python to collect the space used by the DOM. # Enable Python to collect the space used by the DOM. #import pdb; pdb.set_trace() | 2.060335 | 2 |
wirepas_backend_client/messages/msap_cmds/msap_begin.py | bencorrado/backend-client | 0 | 6624894 | import struct
cmdMsapBeginReq: bytes = bytes([0x01])
cmdMsapBeginResp: bytes = bytes([0x81])
class MsapBeginReq:
""" Command MSAP Begin request """
__is_valid: bool = False
__countdown_sec: int = 0x00
@staticmethod
def getType() -> int:
return int(cmdMsapBeginReq[0])
def __init__(self):
self.__is_valid = True
def toBytes(self) -> bytes:
ret: bytes
if self.__is_valid:
# WP-RM-117, V5.0.A
ret: bytes = cmdMsapBeginReq + bytes([0])
else:
raise ValueError("Not valid parameter for OtapMsapUpdateReq")
return ret
def is_valid(self) -> bool:
return self.__is_valid
class MsapBeginResp:
""" Command MSAP Begin request response """
__is_valid: bool = False
@staticmethod
def getType() -> int:
return int(cmdMsapBeginResp[0])
def __init__(self, data_bytes):
# Validate response type
fmt = "=cc" # if there is error message this does not pack rest
if len(data_bytes) == struct.calcsize(fmt):
message_len: int = data_bytes[1]
# See WP-RM-117 @ MSAP Scratchpad Update
# https://docs.python.org/3/library/struct.html?highlight=struct#format-characters
if message_len == 0:
fields = struct.unpack(fmt, data_bytes)
(self.type, self.msgLen) = fields
if self.type == cmdMsapBeginResp:
self.__is_valid = True
else:
print("error: Error response type is {}".format(self.type))
else:
print("Unknown message. Message len is", message_len)
else:
print("Deserialization failed. Data size:", len(data_bytes))
def is_valid(self) -> bool:
return self.__is_valid
| import struct
cmdMsapBeginReq: bytes = bytes([0x01])
cmdMsapBeginResp: bytes = bytes([0x81])
class MsapBeginReq:
""" Command MSAP Begin request """
__is_valid: bool = False
__countdown_sec: int = 0x00
@staticmethod
def getType() -> int:
return int(cmdMsapBeginReq[0])
def __init__(self):
self.__is_valid = True
def toBytes(self) -> bytes:
ret: bytes
if self.__is_valid:
# WP-RM-117, V5.0.A
ret: bytes = cmdMsapBeginReq + bytes([0])
else:
raise ValueError("Not valid parameter for OtapMsapUpdateReq")
return ret
def is_valid(self) -> bool:
return self.__is_valid
class MsapBeginResp:
""" Command MSAP Begin request response """
__is_valid: bool = False
@staticmethod
def getType() -> int:
return int(cmdMsapBeginResp[0])
def __init__(self, data_bytes):
# Validate response type
fmt = "=cc" # if there is error message this does not pack rest
if len(data_bytes) == struct.calcsize(fmt):
message_len: int = data_bytes[1]
# See WP-RM-117 @ MSAP Scratchpad Update
# https://docs.python.org/3/library/struct.html?highlight=struct#format-characters
if message_len == 0:
fields = struct.unpack(fmt, data_bytes)
(self.type, self.msgLen) = fields
if self.type == cmdMsapBeginResp:
self.__is_valid = True
else:
print("error: Error response type is {}".format(self.type))
else:
print("Unknown message. Message len is", message_len)
else:
print("Deserialization failed. Data size:", len(data_bytes))
def is_valid(self) -> bool:
return self.__is_valid
| en | 0.656924 | Command MSAP Begin request # WP-RM-117, V5.0.A Command MSAP Begin request response # Validate response type # if there is error message this does not pack rest # See WP-RM-117 @ MSAP Scratchpad Update # https://docs.python.org/3/library/struct.html?highlight=struct#format-characters | 2.485999 | 2 |
devices/debian.py | lynnlincbn/boardfarm | 0 | 6624895 | <gh_stars>0
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import sys
import time
import pexpect
import base
import atexit
import ipaddress
import os
import binascii
import glob
from termcolor import colored, cprint
class DebianBox(base.BaseDevice):
'''
A linux machine running an ssh server.
'''
model = ('debian')
prompt = ['root\\@.*:.*#', '/ # ', ".*:~ #" ]
static_route = None
static_ip = False
wan_dhcp = False
wan_no_eth0 = False
wan_cmts_provisioner = False
pkgs_installed = False
install_pkgs_after_dhcp = False
is_bridged = False
iface_dut = "eth1"
def __init__(self,
*args,
**kwargs):
self.args = args
self.kwargs = kwargs
name = kwargs.pop('name', None)
ipaddr = kwargs.pop('ipaddr', None)
color = kwargs.pop('color', 'black')
username = kwargs.pop('username', 'root')
password = kwargs.pop('password', '<PASSWORD>')
port = kwargs.pop('port', '22')
output = kwargs.pop('output', sys.stdout)
reboot = kwargs.pop('reboot', False)
location = kwargs.pop('location', None)
pre_cmd_host = kwargs.pop('pre_cmd_host', None)
cmd = kwargs.pop('cmd', None)
post_cmd_host = kwargs.pop('post_cmd_host', None)
post_cmd = kwargs.pop('post_cmd', None)
cleanup_cmd = kwargs.pop('cleanup_cmd', None)
env = kwargs.pop('env', None)
lan_network = kwargs.pop('lan_network', ipaddress.IPv4Network(u"192.168.1.0/24"))
lan_gateway = kwargs.pop('lan_gateway', ipaddress.IPv4Address(u"192.168.1.1"))
self.name = name
self.http_proxy = kwargs.pop('http_proxy', None)
if ipaddr is not None:
pexpect.spawn.__init__(self,
command="ssh",
args=['%s@%s' % (username, ipaddr),
'-p', port,
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'ServerAliveInterval=60',
'-o', 'ServerAliveCountMax=5'])
self.ipaddr = ipaddr
else:
if pre_cmd_host is not None:
sys.stdout.write("\tRunning pre_cmd_host.... ")
sys.stdout.flush()
phc = pexpect.spawn(command='bash', args=['-c', pre_cmd_host], env=env)
phc.expect(pexpect.EOF, timeout=120)
print("\tpre_cmd_host done")
if cleanup_cmd is not None:
self.cleanup_cmd = cleanup_cmd
atexit.register(self.run_cleanup_cmd)
pexpect.spawn.__init__(self, command="bash", args=['-c', cmd], env=env)
self.ipaddr = None
self.color = color
self.output = output
self.username = username
if username != "root":
self.prompt.append('%s\\@.*:.*$' % username)
self.password = password
self.port = port
self.location = location
self.env=env
self.lan_network = lan_network
self.lan_gateway = lan_gateway
# we need to pick a non-conflicting private network here
# also we want it to be consistant and not random for a particular
# board
if (lan_gateway - lan_network.num_addresses).is_private:
self.gw = lan_gateway - lan_network.num_addresses
else:
self.gw = lan_gateway + lan_network.num_addresses
self.nw = ipaddress.IPv4Network(str(self.gw).decode('utf-8') + '/' + str(lan_network.netmask), strict=False)
# override above values if set in wan options
if 'options' in kwargs:
options = [x.strip() for x in kwargs['options'].split(',')]
for opt in options:
if opt.startswith('wan-static-ip:'):
self.gw = opt.replace('wan-static-ip:', '')
self.static_ip = True
if opt.startswith('wan-static-route:'):
self.static_route = opt.replace('wan-static-route:', '').replace('-', ' via ')
# TODO: remove wan-static-route at some point above
if opt.startswith('static-route:'):
self.static_route = opt.replace('static-route:', '').replace('-', ' via ')
if opt.startswith('wan-dhcp-client'):
self.wan_dhcp = True
if opt.startswith('wan-cmts-provisioner'):
self.wan_cmts_provisioner = True
if opt.startswith('wan-no-eth0'):
self.wan_no_eth0 = True
try:
i = self.expect(["yes/no", "assword:", "Last login"] + self.prompt, timeout=30)
except pexpect.TIMEOUT as e:
raise Exception("Unable to connect to %s." % name)
except pexpect.EOF as e:
if hasattr(self, "before"):
print(self.before)
raise Exception("Unable to connect to %s." % name)
if i == 0:
self.sendline("yes")
i = self.expect(["Last login", "assword:"])
if i == 1:
self.sendline(password)
else:
pass
# if we did initially get a prompt wait for one here
if i < 3:
self.expect(self.prompt)
if ipaddr is None:
self.sendline('hostname')
self.expect('hostname')
self.expect(self.prompt)
ipaddr = self.ipaddr = self.before.strip()
if self.port != 22:
cprint("%s port %s device console = %s" % (ipaddr, port, colored(color, color)), None, attrs=['bold'])
else:
cprint("%s device console = %s" % (ipaddr, colored(color, color)), None, attrs=['bold'])
if post_cmd_host is not None:
sys.stdout.write("\tRunning post_cmd_host.... ")
sys.stdout.flush()
phc = pexpect.spawn(command='bash', args=['-c', post_cmd_host], env=env)
i = phc.expect([pexpect.EOF, pexpect.TIMEOUT, 'password'])
if i > 0:
print("\tpost_cmd_host did not complete, it likely failed\n")
else:
print("\tpost_cmd_host done")
if post_cmd is not None:
env_prefix=""
for k, v in env.iteritems():
env_prefix += "export %s=%s; " % (k, v)
self.sendline(env_prefix + post_cmd)
self.expect(self.prompt)
if reboot:
self.reset()
self.logfile_read = output
def run_cleanup_cmd(self):
sys.stdout.write("Running cleanup_cmd on %s..." % self.name)
sys.stdout.flush()
cc = pexpect.spawn(command='bash', args=['-c', self.cleanup_cmd], env=self.env)
cc.expect(pexpect.EOF, timeout=120)
print("cleanup_cmd done.")
def sudo_sendline(self, s):
if self.username != "root":
s = "sudo " + s
return super(type(self), self).sendline(s)
def reset(self):
self.sendline('reboot')
self.expect(['going down','disconnected'])
try:
self.expect(self.prompt, timeout=10)
except:
pass
time.sleep(15) # Wait for the network to go down.
for i in range(0, 20):
try:
pexpect.spawn('ping -w 1 -c 1 ' + self.name).expect('64 bytes', timeout=1)
except:
print(self.name + " not up yet, after %s seconds." % (i + 15))
else:
print("%s is back after %s seconds, waiting for network daemons to spawn." % (self.name, i + 14))
time.sleep(15)
break
self.__init__(self.name, self.color,
self.output, self.username,
self.password, self.port,
reboot=False)
def get_interface_ipaddr(self, interface):
self.sendline("\nifconfig %s" % interface)
regex = ['addr:(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*(Bcast|P-t-P):',
'inet (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*(broadcast|P-t-P)']
self.expect(regex, timeout=5)
ipaddr = self.match.group(1)
self.expect(self.prompt)
return ipaddr
def install_pkgs(self):
if self.pkgs_installed == True:
return
if not self.wan_no_eth0 and not self.wan_dhcp and not self.install_pkgs_after_dhcp:
self.sendline('ifconfig %s down' % self.iface_dut)
self.expect(self.prompt)
pkgs = "isc-dhcp-server xinetd tinyproxy curl apache2-utils nmap psmisc vim-common tftpd-hpa pppoe isc-dhcp-server procps iptables lighttpd psmisc dnsmasq"
def _install_pkgs():
self.sendline('apt-get update && apt-get -o DPkg::Options::="--force-confnew" -qy install %s' % pkgs)
if 0 == self.expect(['Reading package', pexpect.TIMEOUT], timeout=60):
self.expect(self.prompt, timeout=300)
else:
print("Failed to download packages, things might not work")
self.sendcontrol('c')
self.expect(self.prompt)
self.pkgs_installed = True
# TODO: use netns for all this?
undo_default_route = None
self.sendline('ping -c1 deb.debian.org')
i = self.expect(['ping: unknown host', 'connect: Network is unreachable', pexpect.TIMEOUT] + self.prompt, timeout=10)
if 0 == i:
# TODO: don't reference eth0, but the uplink iface
self.sendline("echo SYNC; ip route list | grep 'via.*dev eth0' | awk '{print $3}'")
self.expect_exact("SYNC\r\n")
if 0 == self.expect(['(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\r\n'] + self.prompt, timeout=5):
possible_default_gw = self.match.group(1)
self.sendline("ip route add default via %s" % possible_default_gw)
self.expect(self.prompt)
self.sendline('ping -c1 deb.debian.org')
self.expect(self.prompt)
undo_default_route = possible_default_gw
self.sendline('apt-get update && apt-get -o DPkg::Options::="--force-confnew" -qy install %s' % pkgs)
if 0 == self.expect(['Reading package', pexpect.TIMEOUT], timeout=60):
self.expect(self.prompt, timeout=300)
else:
print("Failed to download packages, things might not work")
self.sendcontrol('c')
self.expect(self.prompt)
elif 1 == i:
if self.install_pkgs_after_dhcp:
_install_pkgs()
else:
self.install_pkgs_after_dhcp = True
return
elif 2 == i:
self.sendcontrol('c')
self.expect(self.prompt)
else:
_install_pkgs()
if undo_default_route is not None:
self.sendline("ip route del default via %s" % undo_default_route)
self.expect(self.prompt)
def ip_neigh_flush(self):
self.sendline('\nip -s neigh flush all')
self.expect('flush all')
self.expect(self.prompt)
def turn_on_pppoe(self):
self.sendline('cat > /etc/ppp/pppoe-server-options << EOF')
self.sendline('noauth')
self.sendline('ms-dns 8.8.8.8')
self.sendline('ms-dns 8.8.4.4')
self.sendline('EOF')
self.expect(self.prompt)
self.sendline('pppoe-server -k -I %s -L 192.168.2.1 -R 192.168.2.10 -N 4' % self.iface_dut)
self.expect(self.prompt)
def turn_off_pppoe(self):
self.sendline("\nkillall pppoe-server pppoe pppd")
self.expect("pppd")
self.expect(self.prompt)
def start_tftp_server(self):
# we can call this first, before configure so we need to do this here
# as well
self.install_pkgs()
# the entire reason to start tftp is to copy files to devices
# which we do via ssh so let's start that as well
self.start_sshd_server()
try:
eth1_addr = self.get_interface_ipaddr(self.iface_dut)
except:
eth1_addr = None
# set WAN ip address, for now this will always be this address for the device side
# TODO: fix gateway for non-WAN tftp_server
if self.gw != eth1_addr:
self.sendline('ifconfig %s %s' % (self.iface_dut, getattr(self, 'gw', '192.168.0.1')))
self.expect(self.prompt)
self.sendline('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
#configure tftp server
self.sendline('/etc/init.d/tftpd-hpa stop')
self.expect('Stopping')
self.expect(self.prompt)
self.sendline('rm -rf /tftpboot')
self.expect(self.prompt)
self.sendline('rm -rf /srv/tftp')
self.expect(self.prompt)
self.sendline('mkdir -p /srv/tftp')
self.expect(self.prompt)
self.sendline('ln -sf /srv/tftp/ /tftpboot')
self.expect(self.prompt)
self.sendline('mkdir -p /tftpboot/tmp')
self.expect(self.prompt)
self.sendline('chmod a+w /tftpboot/tmp')
self.expect(self.prompt)
self.sendline('mkdir -p /tftpboot/crashdump')
self.expect(self.prompt)
self.sendline('chmod a+w /tftpboot/crashdump')
self.expect(self.prompt)
self.sendline('sed /TFTP_OPTIONS/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_OPTIONS=\\"--secure --create\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('sed /TFTP_ADDRESS/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_ADDRESS=\\":69\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('sed /TFTP_DIRECTORY/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_DIRECTORY=\\"/srv/tftp\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('/etc/init.d/tftpd-hpa restart')
self.expect(self.prompt)
def restart_tftp_server(self):
self.sendline('\n/etc/init.d/tftpd-hpa restart')
self.expect('Restarting')
self.expect(self.prompt)
def start_sshd_server(self):
self.sendline('/etc/init.d/rsyslog start')
self.expect(self.prompt)
self.sendline('/etc/init.d/ssh start')
self.expect(self.prompt)
self.sendline('sed "s/.*PermitRootLogin.*/PermitRootLogin yes/g" -i /etc/ssh/sshd_config')
self.expect(self.prompt)
self.sendline('/etc/init.d/ssh reload')
self.expect(self.prompt)
def copy_file_to_server(self, src, dst=None):
def gzip_str(string_):
import gzip
import io
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as fo:
fo.write(string_)
return out.getvalue()
with open(src, mode='rb') as file:
bin_file = binascii.hexlify(gzip_str(file.read()))
if dst is None:
dst = '/tftpboot/' + os.path.basename(src)
print ("Copying %s to %s" % (src, dst))
saved_logfile_read = self.logfile_read
self.logfile_read = None
self.sendline('''cat << EOFEOFEOFEOF | xxd -r -p | gunzip > %s
%s
EOFEOFEOFEOF''' % (dst, bin_file))
self.expect(self.prompt)
self.sendline('ls %s' % dst)
self.expect_exact('ls %s' % dst)
i = self.expect(['ls: cannot access %s: No such file or directory' % dst] + self.prompt)
if i == 0:
raise Exception("Failed to copy file")
self.logfile_read = saved_logfile_read
def configure(self, kind, config=[]):
self.install_pkgs()
self.start_sshd_server()
if kind == "wan_device":
self.setup_as_wan_gateway()
elif kind == "lan_device":
self.setup_as_lan_device()
if self.static_route is not None:
# TODO: add some ppint handle this more robustly
self.send('ip route del %s; ' % self.static_route.split(' via ')[0])
self.sendline('ip route add %s' % self.static_route)
self.expect(self.prompt)
def update_cmts_isc_dhcp_config(self, board_config):
self.sendline('''cat > /etc/dhcp/dhcpd.conf << EOF
log-facility local7;
option log-servers 192.168.3.1;
option time-servers 192.168.3.1;
next-server 192.168.3.1;
default-lease-time 604800;
max-lease-time 604800;
allow leasequery;
option space docsis-mta;
option docsis-mta.dhcp-server-1 code 1 = ip-address;
option docsis-mta.dhcp-server-1 192.168.3.1;
option docsis-mta.dhcp-server-2 code 2 = ip-address;
option docsis-mta.dhcp-server-2 192.168.3.1;
option docsis-mta.provision-server code 3 = { integer 8, string };
option docsis-mta.provision-server 0 08:54:43:4F:4D:4C:41:42:53:03:43:4F:4D:00 ;
option docsis-mta-encap code 122 = encapsulate docsis-mta;
option docsis-mta.kerberos-realm code 6 = string;
option docsis-mta.kerberos-realm 05:42:41:53:49:43:01:31:00 ;
subnet 192.168.3.0 netmask 255.255.255.0 {
interface %s;
}
subnet 192.168.200.0 netmask 255.255.255.0
{
interface %s;
range 192.168.200.10 192.168.200.250;
option routers 192.168.200.1;
option broadcast-address 192.168.200.255;
option dhcp-parameter-request-list 43;
option domain-name "local";
option time-offset 1;
option tftp-server-name "192.168.3.1";
filename "UNLIMITCASA.cfg";
allow unknown-clients;
}
subnet 192.168.201.0 netmask 255.255.255.0
{
interface %s;
range 192.168.201.10 192.168.201.250;
option routers 192.168.201.1;
option broadcast-address 192.168.201.255;
option time-offset 1;
option domain-name-servers %s;
allow unknown-clients;
}
EOF''' % (self.iface_dut, self.iface_dut, self.iface_dut, self.gw))
self.expect(self.prompt)
# The board will ignore this unless the docsis-mac is set to ipv6
# That needs to be done manually as well as copying any CM cfg files
# to the provisioner (e.g. still not fully automated)
self.sendline('''cat > /etc/dhcp/dhcpd6.conf << EOF
preferred-lifetime 7500;
option dhcp-renewal-time 3600;
option dhcp-rebinding-time 5400;
allow leasequery;
option dhcp6.name-servers fdf8:f53e:61e4::18;
option dhcp6.domain-search "test.example.com","example.com";
option dhcp6.info-refresh-time 21600;
option dhcp6.ia_pd code 25 = { integer 32, integer 32, integer 32, integer 16, integer 16, integer 32, integer 32, integer 8, ip6-address};
option dhcp6.gateway code 32003 = ip6-address;
option space docsis code width 2 length width 2 hash size 100;
option docsis.tftp-servers code 32 = array of ip6-address;
option docsis.configuration-file code 33 = text;
option docsis.syslog-servers code 34 = array of ip6-address;
#option docsis.device-id code 36 = string;
option docsis.time-servers code 37 = array of ip6-address;
option docsis.time-offset code 38 = signed integer 32;
option vsio.docsis code 4491 = encapsulate docsis;
subnet6 2001:ed8:77b5:3::/64 {
range6 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fc00:e968:6179::de52:7100;
interface %s;
option docsis.tftp-servers fc00:db20:35b:7399::5;
option docsis.time-servers fc00:db20:35b:7399::5;
option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg";
option docsis.syslog-servers fc00:db20:35b:7399::5 ;
option docsis.time-offset 5000;
}
subnet6 2001:ed8:77b5:2000::/64 {
range6 fc00:db20:35b:7399::5 fdf8:f53e:61e4::18;
interface %s;
option docsis.tftp-servers fc00:db20:35b:7399::5;
option docsis.time-servers fc00:db20:35b:7399::5;
option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg";
option docsis.syslog-servers fc00:db20:35b:7399::5;
option docsis.time-offset 5000;
}
subnet6 2001:ed8:77b5:2001::/64 {
range6 fc00:e968:6179::de52:7100 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b;
interface %s;
option dhcp6.ia_pd 1234 20000 40000 26 25 30000 60000 64 2001:ed8:77b5:4::;
option dhcp6.solmax-rt 240;
option dhcp6.inf-max-rt 360;
}
EOF''' % (self.iface_dut, self.iface_dut, self.iface_dut))
self.expect(self.prompt)
self.sendline('rm /etc/dhcp/dhcpd.conf.''' + board_config['station'])
self.expect(self.prompt)
if 'extra_provisioning' in board_config:
cfg_file = "/etc/dhcp/dhcpd.conf." + board_config['station']
# zero out old config
self.sendline('cp /dev/null %s' % cfg_file)
self.expect(self.prompt)
# there is probably a better way to construct this file...
for dev, cfg_sec in board_config['extra_provisioning'].iteritems():
self.sendline("echo 'host %s-%s {' >> %s" % (dev, board_config['station'], cfg_file))
for key, value in cfg_sec.iteritems():
if key == "options":
for k2, v2 in value.iteritems():
self.sendline("echo ' option %s %s;' >> %s" % (k2, v2, cfg_file))
self.expect(self.prompt)
else:
self.sendline("echo ' %s %s;' >> %s" % (key, value, cfg_file))
self.expect(self.prompt)
self.sendline("echo '}' >> %s" % cfg_file)
# TODO: extra per board dhcp6 provisioning
# combine all configs into one
self.sendline("cat /etc/dhcp/dhcpd.conf.* >> /etc/dhcp/dhcpd.conf")
self.expect(self.prompt)
def copy_cmts_provisioning_files(self, board_config):
# Look in all overlays as well, and PATH as a workaround for standalone
paths = os.environ['PATH'].split(os.pathsep)
paths += os.environ['BFT_OVERLAY'].split(' ')
cfg_list = []
if 'tftp_cfg_files' in board_config:
for path in paths:
for cfg in board_config['tftp_cfg_files']:
cfg_list += glob.glob(path + '/devices/cm-cfg/%s' % cfg)
else:
for path in paths:
cfg_list += glob.glob(path + '/devices/cm-cfg/UNLIMITCASA.cfg')
cfg_set = set(cfg_list)
# Copy binary files to tftp server
for cfg in cfg_set:
# TODO: use common cmd_exists
cmd_exists = lambda x: any(os.access(os.path.join(path, x), os.X_OK) for path in os.environ["PATH"].split(os.pathsep))
assert cmd_exists('docsis')
# TODO: much better error checking
os.system("docsis -e %s /dev/null %s" % (cfg, cfg.replace('.txt', '.bin')))
self.copy_file_to_server(cfg.replace('.txt', '.bin'))
os.remove(cfg.replace('.txt', '.bin'))
def provision_board(self, board_config):
''' Setup DHCP and time server etc for CM provisioning'''
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
# we are bypass this for now (see http://patchwork.ozlabs.org/patch/117949/)
self.sendline('sysctl -w net.ipv6.conf.%s.accept_dad=0' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw))
self.expect(self.prompt)
self.sendline('ifconfig %s inet6 add fc00:db20:35b:7399::5/64' % self.iface_dut)
self.expect(self.prompt)
# TODO: specify these via config
self.sendline('ip route add 192.168.201.0/24 via 192.168.3.222')
self.expect(self.prompt)
self.sendline('ip route add 192.168.200.0/24 via 192.168.3.222')
self.expect(self.prompt)
self.sendline('ip -6 route add 2001:ed8:77b5:2000::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ip -6 route add 2001:ed8:77b5:2001::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut)
self.expect(self.prompt)
self.update_cmts_isc_dhcp_config(board_config)
self.sendline('/etc/init.d/isc-dhcp-server start')
# We expect both, so we need debian 9 or greater for this device
self.expect('Starting ISC DHCPv4 server.*dhcpd.')
self.expect('Starting ISC DHCPv6 server.*dhcpd.')
self.expect(self.prompt)
# this might be redundant, but since might not have a tftpd server running
# here we have to start one for the CM configs
self.start_tftp_server()
self.copy_cmts_provisioning_files(board_config)
self.sendline("sed 's/disable\\t\\t= yes/disable\\t\\t= no/g' -i /etc/xinetd.d/time")
self.expect(self.prompt)
self.sendline("grep -q flags.*=.*IPv6 /etc/xinetd.d/time || sed '/wait.*=/a\\\\tflags\\t\\t= IPv6' -i /etc/xinetd.d/time")
self.expect(self.prompt)
self.sendline('/etc/init.d/xinetd restart')
self.expect('Starting internet superserver: xinetd.')
self.expect(self.prompt)
def reprovision_board(self, board_config):
'''New DHCP, cfg files etc for board after it's been provisioned once'''
self.copy_cmts_provisioning_files(board_config)
self.update_cmts_isc_dhcp_config(board_config)
self.sendline('/etc/init.d/isc-dhcp-server restart')
self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*'])
self.expect(self.prompt)
def setup_dhcp_server(self):
# configure DHCP server
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('cat > /etc/dhcp/dhcpd.conf << EOF')
self.sendline('ddns-update-style none;')
self.sendline('option domain-name "bigfoot-test";')
self.sendline('option domain-name-servers %s;' % self.gw)
self.sendline('default-lease-time 600;')
self.sendline('max-lease-time 7200;')
# use the same netmask as the lan device
self.sendline('subnet %s netmask %s {' % (self.nw.network_address, self.nw.netmask))
self.sendline(' range %s %s;' % (self.nw.network_address + 10, self.nw.network_address + 100))
self.sendline(' option routers %s;' % self.gw)
self.sendline('}')
self.sendline('EOF')
self.expect(self.prompt)
self.sendline('/etc/init.d/isc-dhcp-server start')
self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*'])
self.expect(self.prompt)
def setup_dnsmasq(self):
self.sendline('cat > /etc/dnsmasq.conf << EOF')
self.sendline('server=8.8.4.4')
self.sendline('listen-address=127.0.0.1')
self.sendline('listen-address=%s' % self.gw)
self.sendline('EOF')
self.sendline('/etc/init.d/dnsmasq restart')
self.expect(self.prompt)
def setup_as_wan_gateway(self):
self.setup_dnsmasq()
self.sendline('killall iperf ab hping3')
self.expect(self.prompt)
self.sendline('\nsysctl net.ipv6.conf.all.disable_ipv6=0')
self.expect('sysctl ')
self.expect(self.prompt)
# potential cleanup so this wan device works
self.sendline('iptables -t nat -X')
self.expect(self.prompt)
self.sendline('iptables -t nat -F')
self.expect(self.prompt)
# set WAN ip address
if self.wan_dhcp:
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('dhclient -r %s; dhclient %s' % (self.iface_dut, self.iface_dut))
self.expect(self.prompt)
self.gw = self.get_interface_ipaddr(self.iface_dut)
else:
self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw))
self.expect(self.prompt)
self.sendline('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
if not self.wan_cmts_provisioner:
self.setup_dhcp_server()
# configure routing
self.sendline('sysctl net.ipv4.ip_forward=1')
self.expect(self.prompt)
if self.wan_no_eth0 or self.wan_dhcp:
wan_uplink_iface = self.iface_dut
else:
wan_uplink_iface = "eth0"
wan_ip_uplink = self.get_interface_ipaddr(wan_uplink_iface)
self.sendline('iptables -t nat -A POSTROUTING -o %s -j SNAT --to-source %s' % (wan_uplink_iface, wan_ip_uplink))
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps')
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack')
self.expect(self.prompt)
self.sendline('ifconfig %s' % self.iface_dut)
self.expect(self.prompt)
self.turn_off_pppoe()
def setup_as_lan_device(self):
# potential cleanup so this wan device works
self.sendline('killall iperf ab hping3')
self.expect(self.prompt)
self.sendline('\niptables -t nat -X')
self.expect('iptables -t')
self.expect(self.prompt)
self.sendline('sysctl net.ipv6.conf.all.disable_ipv6=0')
self.expect(self.prompt)
self.sendline('sysctl net.ipv4.ip_forward=1')
self.expect(self.prompt)
self.sendline('iptables -t nat -F; iptables -t nat -X')
self.expect(self.prompt)
self.sendline('iptables -F; iptables -X')
self.expect(self.prompt)
self.sendline('iptables -t nat -A PREROUTING -p tcp --dport 222 -j DNAT --to-destination %s:22' % self.lan_gateway)
self.expect(self.prompt)
self.sendline('iptables -t nat -A POSTROUTING -o %s -p tcp --dport 22 -j MASQUERADE' % self.iface_dut)
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps')
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack')
self.expect(self.prompt)
self.sendline('pkill --signal 9 -f dhclient.*%s' % self.iface_dut)
self.expect(self.prompt)
def start_lan_client(self, wan_gw=None):
self.sendline('\nifconfig %s up' % self.iface_dut)
self.expect('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
self.sendline("dhclient -r %s" % self.iface_dut)
self.expect(self.prompt)
self.sendline('\nifconfig %s 0.0.0.0' % self.iface_dut)
self.expect(self.prompt)
self.sendline('rm /var/lib/dhcp/dhclient.leases')
self.expect(self.prompt)
self.sendline("sed -e 's/mv -f $new_resolv_conf $resolv_conf/cat $new_resolv_conf > $resolv_conf/g' -i /sbin/dhclient-script")
self.expect(self.prompt)
self.sendline('ip route del default dev eth0')
self.expect(self.prompt)
for attempt in range(3):
try:
self.sendline('dhclient -v %s' % self.iface_dut)
self.expect('DHCPOFFER', timeout=30)
self.expect(self.prompt)
break
except:
self.sendcontrol('c')
else:
raise Exception("Error: Device on LAN couldn't obtain address via DHCP.")
self.sendline('ifconfig %s' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ip route')
# TODO: we should verify this so other way, because the they could be the same subnets
# in theory
i = self.expect(['default via %s dev %s' % (self.lan_gateway, self.iface_dut), pexpect.TIMEOUT], timeout=5)
if i == 1:
# bridged mode
self.is_bridged = True
# update gw
self.sendline("ip route list 0/0 | awk '{print $3}'")
self.expect_exact("ip route list 0/0 | awk '{print $3}'")
self.expect(self.prompt)
self.lan_gateway = ipaddress.IPv4Address(self.before.strip().decode())
ip_addr = self.get_interface_ipaddr(self.iface_dut)
self.sendline("ip route | grep %s | awk '{print $1}'" % ip_addr)
self.expect_exact("ip route | grep %s | awk '{print $1}'" % ip_addr)
self.expect(self.prompt)
self.lan_network = ipaddress.IPv4Network(self.before.strip().decode())
# Setup HTTP proxy, so board webserver is accessible via this device
self.sendline('curl --version')
self.expect(self.prompt)
self.sendline('ab -V')
self.expect(self.prompt)
self.sendline('nmap --version')
self.expect(self.prompt)
self.sendline("sed -i 's/^Port 8888/Port 8080/' /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf")
self.expect(self.prompt)
self.sendline("sed 's/#Allow/Allow/g' -i /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf")
self.expect(self.prompt)
self.sendline('/etc/init.d/tinyproxy restart')
self.expect('Restarting')
self.expect(self.prompt)
# Write a useful ssh config for routers
self.sendline('mkdir -p ~/.ssh')
self.sendline('cat > ~/.ssh/config << EOF')
self.sendline('Host %s' % self.lan_gateway)
self.sendline('StrictHostKeyChecking no')
self.sendline('UserKnownHostsFile=/dev/null')
self.sendline('')
self.sendline('Host krouter')
self.sendline('Hostname %s' % self.lan_gateway)
self.sendline('StrictHostKeyChecking no')
self.sendline('UserKnownHostsFile=/dev/null')
self.sendline('EOF')
self.expect(self.prompt)
# Copy an id to the router so people don't have to type a password to ssh or scp
self.sendline('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway)
self.expect_exact('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway)
if 0 == self.expect(['SSH'] + self.prompt, timeout=5) and not self.is_bridged:
self.sendcontrol('c')
self.expect(self.prompt)
self.sendline('[ -e /root/.ssh/id_rsa ] || ssh-keygen -N "" -f /root/.ssh/id_rsa')
if 0 != self.expect(['Protocol mismatch.'] + self.prompt):
self.sendline('scp ~/.ssh/id_rsa.pub %s:/etc/dropbear/authorized_keys' % self.lan_gateway)
if 0 == self.expect(['assword:'] + self.prompt):
self.sendline('password')
self.expect(self.prompt)
else:
self.sendcontrol('c')
self.expect(self.prompt)
if self.install_pkgs_after_dhcp:
self.install_pkgs()
if wan_gw is not None and 'options' in self.kwargs and \
'lan-fixed-route-to-wan' in self.kwargs['options']:
self.sendline('ip route add %s via %s' % (wan_gw, self.lan_gateway))
self.expect(self.prompt)
def add_new_user(self, id, pwd):
'''Create new login ID. But check if already exists'''
self.sendline('\nadduser %s' % id)
try:
self.expect_exact("Enter new UNIX password", timeout=5)
self.sendline('%s' % pwd)
self.expect_exact("Retype new UNIX password")
self.sendline('%s' % pwd)
self.expect_exact("Full Name []")
self.sendline('%s' % id)
self.expect_exact("Room Number []")
self.sendline('1')
self.expect_exact("Work Phone []")
self.sendline('4081234567')
self.expect_exact("Home Phone []")
self.sendline('4081234567')
self.expect_exact("Other []")
self.sendline('4081234567')
self.expect_exact("Is the information correct?")
self.sendline('y')
self.expect(self.prompt)
self.sendline('usermod -aG sudo %s' % id)
self.expect(self.prompt)
# Remove "$" in the login prompt and replace it with "#"
self.sendline('sed -i \'s/\\w\\\$ /\\\w# /g\' //home/%s/.bashrc' % id)
self.expect(self.prompt, timeout=30)
except:
self.expect(self.prompt, timeout=30)
def tftp_server_ip_int(self):
'''Returns the DUT facing side tftp server ip'''
return self.gw
if __name__ == '__main__':
# Example use
try:
ipaddr, port = sys.argv[1].split(':')
except:
raise Exception("First argument should be in form of ipaddr:port")
dev = DebianBox(ipaddr=ipaddr,
color='blue',
username="root",
password="<PASSWORD>",
port=port)
dev.sendline('echo Hello')
dev.expect('Hello', timeout=4)
dev.expect(dev.prompt)
if sys.argv[2] == "setup_as_lan_device":
dev.configure("lan_device")
if sys.argv[2] == "setup_as_wan_gateway":
dev.configure("wan_device")
if sys.argv[2] == "test_voip":
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.getcwd() + '/tests')
from lib import installers
installers.install_asterisk(dev)
print
| # Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import sys
import time
import pexpect
import base
import atexit
import ipaddress
import os
import binascii
import glob
from termcolor import colored, cprint
class DebianBox(base.BaseDevice):
'''
A linux machine running an ssh server.
'''
model = ('debian')
prompt = ['root\\@.*:.*#', '/ # ', ".*:~ #" ]
static_route = None
static_ip = False
wan_dhcp = False
wan_no_eth0 = False
wan_cmts_provisioner = False
pkgs_installed = False
install_pkgs_after_dhcp = False
is_bridged = False
iface_dut = "eth1"
def __init__(self,
*args,
**kwargs):
self.args = args
self.kwargs = kwargs
name = kwargs.pop('name', None)
ipaddr = kwargs.pop('ipaddr', None)
color = kwargs.pop('color', 'black')
username = kwargs.pop('username', 'root')
password = kwargs.pop('password', '<PASSWORD>')
port = kwargs.pop('port', '22')
output = kwargs.pop('output', sys.stdout)
reboot = kwargs.pop('reboot', False)
location = kwargs.pop('location', None)
pre_cmd_host = kwargs.pop('pre_cmd_host', None)
cmd = kwargs.pop('cmd', None)
post_cmd_host = kwargs.pop('post_cmd_host', None)
post_cmd = kwargs.pop('post_cmd', None)
cleanup_cmd = kwargs.pop('cleanup_cmd', None)
env = kwargs.pop('env', None)
lan_network = kwargs.pop('lan_network', ipaddress.IPv4Network(u"192.168.1.0/24"))
lan_gateway = kwargs.pop('lan_gateway', ipaddress.IPv4Address(u"192.168.1.1"))
self.name = name
self.http_proxy = kwargs.pop('http_proxy', None)
if ipaddr is not None:
pexpect.spawn.__init__(self,
command="ssh",
args=['%s@%s' % (username, ipaddr),
'-p', port,
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'ServerAliveInterval=60',
'-o', 'ServerAliveCountMax=5'])
self.ipaddr = ipaddr
else:
if pre_cmd_host is not None:
sys.stdout.write("\tRunning pre_cmd_host.... ")
sys.stdout.flush()
phc = pexpect.spawn(command='bash', args=['-c', pre_cmd_host], env=env)
phc.expect(pexpect.EOF, timeout=120)
print("\tpre_cmd_host done")
if cleanup_cmd is not None:
self.cleanup_cmd = cleanup_cmd
atexit.register(self.run_cleanup_cmd)
pexpect.spawn.__init__(self, command="bash", args=['-c', cmd], env=env)
self.ipaddr = None
self.color = color
self.output = output
self.username = username
if username != "root":
self.prompt.append('%s\\@.*:.*$' % username)
self.password = password
self.port = port
self.location = location
self.env=env
self.lan_network = lan_network
self.lan_gateway = lan_gateway
# we need to pick a non-conflicting private network here
# also we want it to be consistant and not random for a particular
# board
if (lan_gateway - lan_network.num_addresses).is_private:
self.gw = lan_gateway - lan_network.num_addresses
else:
self.gw = lan_gateway + lan_network.num_addresses
self.nw = ipaddress.IPv4Network(str(self.gw).decode('utf-8') + '/' + str(lan_network.netmask), strict=False)
# override above values if set in wan options
if 'options' in kwargs:
options = [x.strip() for x in kwargs['options'].split(',')]
for opt in options:
if opt.startswith('wan-static-ip:'):
self.gw = opt.replace('wan-static-ip:', '')
self.static_ip = True
if opt.startswith('wan-static-route:'):
self.static_route = opt.replace('wan-static-route:', '').replace('-', ' via ')
# TODO: remove wan-static-route at some point above
if opt.startswith('static-route:'):
self.static_route = opt.replace('static-route:', '').replace('-', ' via ')
if opt.startswith('wan-dhcp-client'):
self.wan_dhcp = True
if opt.startswith('wan-cmts-provisioner'):
self.wan_cmts_provisioner = True
if opt.startswith('wan-no-eth0'):
self.wan_no_eth0 = True
try:
i = self.expect(["yes/no", "assword:", "Last login"] + self.prompt, timeout=30)
except pexpect.TIMEOUT as e:
raise Exception("Unable to connect to %s." % name)
except pexpect.EOF as e:
if hasattr(self, "before"):
print(self.before)
raise Exception("Unable to connect to %s." % name)
if i == 0:
self.sendline("yes")
i = self.expect(["Last login", "assword:"])
if i == 1:
self.sendline(password)
else:
pass
# if we did initially get a prompt wait for one here
if i < 3:
self.expect(self.prompt)
if ipaddr is None:
self.sendline('hostname')
self.expect('hostname')
self.expect(self.prompt)
ipaddr = self.ipaddr = self.before.strip()
if self.port != 22:
cprint("%s port %s device console = %s" % (ipaddr, port, colored(color, color)), None, attrs=['bold'])
else:
cprint("%s device console = %s" % (ipaddr, colored(color, color)), None, attrs=['bold'])
if post_cmd_host is not None:
sys.stdout.write("\tRunning post_cmd_host.... ")
sys.stdout.flush()
phc = pexpect.spawn(command='bash', args=['-c', post_cmd_host], env=env)
i = phc.expect([pexpect.EOF, pexpect.TIMEOUT, 'password'])
if i > 0:
print("\tpost_cmd_host did not complete, it likely failed\n")
else:
print("\tpost_cmd_host done")
if post_cmd is not None:
env_prefix=""
for k, v in env.iteritems():
env_prefix += "export %s=%s; " % (k, v)
self.sendline(env_prefix + post_cmd)
self.expect(self.prompt)
if reboot:
self.reset()
self.logfile_read = output
def run_cleanup_cmd(self):
sys.stdout.write("Running cleanup_cmd on %s..." % self.name)
sys.stdout.flush()
cc = pexpect.spawn(command='bash', args=['-c', self.cleanup_cmd], env=self.env)
cc.expect(pexpect.EOF, timeout=120)
print("cleanup_cmd done.")
def sudo_sendline(self, s):
if self.username != "root":
s = "sudo " + s
return super(type(self), self).sendline(s)
def reset(self):
self.sendline('reboot')
self.expect(['going down','disconnected'])
try:
self.expect(self.prompt, timeout=10)
except:
pass
time.sleep(15) # Wait for the network to go down.
for i in range(0, 20):
try:
pexpect.spawn('ping -w 1 -c 1 ' + self.name).expect('64 bytes', timeout=1)
except:
print(self.name + " not up yet, after %s seconds." % (i + 15))
else:
print("%s is back after %s seconds, waiting for network daemons to spawn." % (self.name, i + 14))
time.sleep(15)
break
self.__init__(self.name, self.color,
self.output, self.username,
self.password, self.port,
reboot=False)
def get_interface_ipaddr(self, interface):
self.sendline("\nifconfig %s" % interface)
regex = ['addr:(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*(Bcast|P-t-P):',
'inet (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*(broadcast|P-t-P)']
self.expect(regex, timeout=5)
ipaddr = self.match.group(1)
self.expect(self.prompt)
return ipaddr
def install_pkgs(self):
if self.pkgs_installed == True:
return
if not self.wan_no_eth0 and not self.wan_dhcp and not self.install_pkgs_after_dhcp:
self.sendline('ifconfig %s down' % self.iface_dut)
self.expect(self.prompt)
pkgs = "isc-dhcp-server xinetd tinyproxy curl apache2-utils nmap psmisc vim-common tftpd-hpa pppoe isc-dhcp-server procps iptables lighttpd psmisc dnsmasq"
def _install_pkgs():
self.sendline('apt-get update && apt-get -o DPkg::Options::="--force-confnew" -qy install %s' % pkgs)
if 0 == self.expect(['Reading package', pexpect.TIMEOUT], timeout=60):
self.expect(self.prompt, timeout=300)
else:
print("Failed to download packages, things might not work")
self.sendcontrol('c')
self.expect(self.prompt)
self.pkgs_installed = True
# TODO: use netns for all this?
undo_default_route = None
self.sendline('ping -c1 deb.debian.org')
i = self.expect(['ping: unknown host', 'connect: Network is unreachable', pexpect.TIMEOUT] + self.prompt, timeout=10)
if 0 == i:
# TODO: don't reference eth0, but the uplink iface
self.sendline("echo SYNC; ip route list | grep 'via.*dev eth0' | awk '{print $3}'")
self.expect_exact("SYNC\r\n")
if 0 == self.expect(['(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\r\n'] + self.prompt, timeout=5):
possible_default_gw = self.match.group(1)
self.sendline("ip route add default via %s" % possible_default_gw)
self.expect(self.prompt)
self.sendline('ping -c1 deb.debian.org')
self.expect(self.prompt)
undo_default_route = possible_default_gw
self.sendline('apt-get update && apt-get -o DPkg::Options::="--force-confnew" -qy install %s' % pkgs)
if 0 == self.expect(['Reading package', pexpect.TIMEOUT], timeout=60):
self.expect(self.prompt, timeout=300)
else:
print("Failed to download packages, things might not work")
self.sendcontrol('c')
self.expect(self.prompt)
elif 1 == i:
if self.install_pkgs_after_dhcp:
_install_pkgs()
else:
self.install_pkgs_after_dhcp = True
return
elif 2 == i:
self.sendcontrol('c')
self.expect(self.prompt)
else:
_install_pkgs()
if undo_default_route is not None:
self.sendline("ip route del default via %s" % undo_default_route)
self.expect(self.prompt)
def ip_neigh_flush(self):
self.sendline('\nip -s neigh flush all')
self.expect('flush all')
self.expect(self.prompt)
def turn_on_pppoe(self):
self.sendline('cat > /etc/ppp/pppoe-server-options << EOF')
self.sendline('noauth')
self.sendline('ms-dns 8.8.8.8')
self.sendline('ms-dns 8.8.4.4')
self.sendline('EOF')
self.expect(self.prompt)
self.sendline('pppoe-server -k -I %s -L 192.168.2.1 -R 192.168.2.10 -N 4' % self.iface_dut)
self.expect(self.prompt)
def turn_off_pppoe(self):
self.sendline("\nkillall pppoe-server pppoe pppd")
self.expect("pppd")
self.expect(self.prompt)
def start_tftp_server(self):
# we can call this first, before configure so we need to do this here
# as well
self.install_pkgs()
# the entire reason to start tftp is to copy files to devices
# which we do via ssh so let's start that as well
self.start_sshd_server()
try:
eth1_addr = self.get_interface_ipaddr(self.iface_dut)
except:
eth1_addr = None
# set WAN ip address, for now this will always be this address for the device side
# TODO: fix gateway for non-WAN tftp_server
if self.gw != eth1_addr:
self.sendline('ifconfig %s %s' % (self.iface_dut, getattr(self, 'gw', '192.168.0.1')))
self.expect(self.prompt)
self.sendline('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
#configure tftp server
self.sendline('/etc/init.d/tftpd-hpa stop')
self.expect('Stopping')
self.expect(self.prompt)
self.sendline('rm -rf /tftpboot')
self.expect(self.prompt)
self.sendline('rm -rf /srv/tftp')
self.expect(self.prompt)
self.sendline('mkdir -p /srv/tftp')
self.expect(self.prompt)
self.sendline('ln -sf /srv/tftp/ /tftpboot')
self.expect(self.prompt)
self.sendline('mkdir -p /tftpboot/tmp')
self.expect(self.prompt)
self.sendline('chmod a+w /tftpboot/tmp')
self.expect(self.prompt)
self.sendline('mkdir -p /tftpboot/crashdump')
self.expect(self.prompt)
self.sendline('chmod a+w /tftpboot/crashdump')
self.expect(self.prompt)
self.sendline('sed /TFTP_OPTIONS/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_OPTIONS=\\"--secure --create\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('sed /TFTP_ADDRESS/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_ADDRESS=\\":69\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('sed /TFTP_DIRECTORY/d -i /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('echo TFTP_DIRECTORY=\\"/srv/tftp\\" >> /etc/default/tftpd-hpa')
self.expect(self.prompt)
self.sendline('/etc/init.d/tftpd-hpa restart')
self.expect(self.prompt)
def restart_tftp_server(self):
self.sendline('\n/etc/init.d/tftpd-hpa restart')
self.expect('Restarting')
self.expect(self.prompt)
def start_sshd_server(self):
self.sendline('/etc/init.d/rsyslog start')
self.expect(self.prompt)
self.sendline('/etc/init.d/ssh start')
self.expect(self.prompt)
self.sendline('sed "s/.*PermitRootLogin.*/PermitRootLogin yes/g" -i /etc/ssh/sshd_config')
self.expect(self.prompt)
self.sendline('/etc/init.d/ssh reload')
self.expect(self.prompt)
def copy_file_to_server(self, src, dst=None):
def gzip_str(string_):
import gzip
import io
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode='w') as fo:
fo.write(string_)
return out.getvalue()
with open(src, mode='rb') as file:
bin_file = binascii.hexlify(gzip_str(file.read()))
if dst is None:
dst = '/tftpboot/' + os.path.basename(src)
print ("Copying %s to %s" % (src, dst))
saved_logfile_read = self.logfile_read
self.logfile_read = None
self.sendline('''cat << EOFEOFEOFEOF | xxd -r -p | gunzip > %s
%s
EOFEOFEOFEOF''' % (dst, bin_file))
self.expect(self.prompt)
self.sendline('ls %s' % dst)
self.expect_exact('ls %s' % dst)
i = self.expect(['ls: cannot access %s: No such file or directory' % dst] + self.prompt)
if i == 0:
raise Exception("Failed to copy file")
self.logfile_read = saved_logfile_read
def configure(self, kind, config=[]):
self.install_pkgs()
self.start_sshd_server()
if kind == "wan_device":
self.setup_as_wan_gateway()
elif kind == "lan_device":
self.setup_as_lan_device()
if self.static_route is not None:
# TODO: add some ppint handle this more robustly
self.send('ip route del %s; ' % self.static_route.split(' via ')[0])
self.sendline('ip route add %s' % self.static_route)
self.expect(self.prompt)
def update_cmts_isc_dhcp_config(self, board_config):
self.sendline('''cat > /etc/dhcp/dhcpd.conf << EOF
log-facility local7;
option log-servers 192.168.3.1;
option time-servers 192.168.3.1;
next-server 192.168.3.1;
default-lease-time 604800;
max-lease-time 604800;
allow leasequery;
option space docsis-mta;
option docsis-mta.dhcp-server-1 code 1 = ip-address;
option docsis-mta.dhcp-server-1 192.168.3.1;
option docsis-mta.dhcp-server-2 code 2 = ip-address;
option docsis-mta.dhcp-server-2 192.168.3.1;
option docsis-mta.provision-server code 3 = { integer 8, string };
option docsis-mta.provision-server 0 08:54:43:4F:4D:4C:41:42:53:03:43:4F:4D:00 ;
option docsis-mta-encap code 122 = encapsulate docsis-mta;
option docsis-mta.kerberos-realm code 6 = string;
option docsis-mta.kerberos-realm 05:42:41:53:49:43:01:31:00 ;
subnet 192.168.3.0 netmask 255.255.255.0 {
interface %s;
}
subnet 192.168.200.0 netmask 255.255.255.0
{
interface %s;
range 192.168.200.10 192.168.200.250;
option routers 192.168.200.1;
option broadcast-address 192.168.200.255;
option dhcp-parameter-request-list 43;
option domain-name "local";
option time-offset 1;
option tftp-server-name "192.168.3.1";
filename "UNLIMITCASA.cfg";
allow unknown-clients;
}
subnet 192.168.201.0 netmask 255.255.255.0
{
interface %s;
range 192.168.201.10 192.168.201.250;
option routers 192.168.201.1;
option broadcast-address 192.168.201.255;
option time-offset 1;
option domain-name-servers %s;
allow unknown-clients;
}
EOF''' % (self.iface_dut, self.iface_dut, self.iface_dut, self.gw))
self.expect(self.prompt)
# The board will ignore this unless the docsis-mac is set to ipv6
# That needs to be done manually as well as copying any CM cfg files
# to the provisioner (e.g. still not fully automated)
self.sendline('''cat > /etc/dhcp/dhcpd6.conf << EOF
preferred-lifetime 7500;
option dhcp-renewal-time 3600;
option dhcp-rebinding-time 5400;
allow leasequery;
option dhcp6.name-servers fdf8:f53e:61e4::18;
option dhcp6.domain-search "test.example.com","example.com";
option dhcp6.info-refresh-time 21600;
option dhcp6.ia_pd code 25 = { integer 32, integer 32, integer 32, integer 16, integer 16, integer 32, integer 32, integer 8, ip6-address};
option dhcp6.gateway code 32003 = ip6-address;
option space docsis code width 2 length width 2 hash size 100;
option docsis.tftp-servers code 32 = array of ip6-address;
option docsis.configuration-file code 33 = text;
option docsis.syslog-servers code 34 = array of ip6-address;
#option docsis.device-id code 36 = string;
option docsis.time-servers code 37 = array of ip6-address;
option docsis.time-offset code 38 = signed integer 32;
option vsio.docsis code 4491 = encapsulate docsis;
subnet6 2001:ed8:77b5:3::/64 {
range6 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fc00:e968:6179::de52:7100;
interface %s;
option docsis.tftp-servers fc00:db20:35b:7399::5;
option docsis.time-servers fc00:db20:35b:7399::5;
option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg";
option docsis.syslog-servers fc00:db20:35b:7399::5 ;
option docsis.time-offset 5000;
}
subnet6 2001:ed8:77b5:2000::/64 {
range6 fc00:db20:35b:7399::5 fdf8:f53e:61e4::18;
interface %s;
option docsis.tftp-servers fc00:db20:35b:7399::5;
option docsis.time-servers fc00:db20:35b:7399::5;
option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg";
option docsis.syslog-servers fc00:db20:35b:7399::5;
option docsis.time-offset 5000;
}
subnet6 2001:ed8:77b5:2001::/64 {
range6 fc00:e968:6179::de52:7100 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b;
interface %s;
option dhcp6.ia_pd 1234 20000 40000 26 25 30000 60000 64 2001:ed8:77b5:4::;
option dhcp6.solmax-rt 240;
option dhcp6.inf-max-rt 360;
}
EOF''' % (self.iface_dut, self.iface_dut, self.iface_dut))
self.expect(self.prompt)
self.sendline('rm /etc/dhcp/dhcpd.conf.''' + board_config['station'])
self.expect(self.prompt)
if 'extra_provisioning' in board_config:
cfg_file = "/etc/dhcp/dhcpd.conf." + board_config['station']
# zero out old config
self.sendline('cp /dev/null %s' % cfg_file)
self.expect(self.prompt)
# there is probably a better way to construct this file...
for dev, cfg_sec in board_config['extra_provisioning'].iteritems():
self.sendline("echo 'host %s-%s {' >> %s" % (dev, board_config['station'], cfg_file))
for key, value in cfg_sec.iteritems():
if key == "options":
for k2, v2 in value.iteritems():
self.sendline("echo ' option %s %s;' >> %s" % (k2, v2, cfg_file))
self.expect(self.prompt)
else:
self.sendline("echo ' %s %s;' >> %s" % (key, value, cfg_file))
self.expect(self.prompt)
self.sendline("echo '}' >> %s" % cfg_file)
# TODO: extra per board dhcp6 provisioning
# combine all configs into one
self.sendline("cat /etc/dhcp/dhcpd.conf.* >> /etc/dhcp/dhcpd.conf")
self.expect(self.prompt)
def copy_cmts_provisioning_files(self, board_config):
# Look in all overlays as well, and PATH as a workaround for standalone
paths = os.environ['PATH'].split(os.pathsep)
paths += os.environ['BFT_OVERLAY'].split(' ')
cfg_list = []
if 'tftp_cfg_files' in board_config:
for path in paths:
for cfg in board_config['tftp_cfg_files']:
cfg_list += glob.glob(path + '/devices/cm-cfg/%s' % cfg)
else:
for path in paths:
cfg_list += glob.glob(path + '/devices/cm-cfg/UNLIMITCASA.cfg')
cfg_set = set(cfg_list)
# Copy binary files to tftp server
for cfg in cfg_set:
# TODO: use common cmd_exists
cmd_exists = lambda x: any(os.access(os.path.join(path, x), os.X_OK) for path in os.environ["PATH"].split(os.pathsep))
assert cmd_exists('docsis')
# TODO: much better error checking
os.system("docsis -e %s /dev/null %s" % (cfg, cfg.replace('.txt', '.bin')))
self.copy_file_to_server(cfg.replace('.txt', '.bin'))
os.remove(cfg.replace('.txt', '.bin'))
def provision_board(self, board_config):
''' Setup DHCP and time server etc for CM provisioning'''
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
# we are bypass this for now (see http://patchwork.ozlabs.org/patch/117949/)
self.sendline('sysctl -w net.ipv6.conf.%s.accept_dad=0' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw))
self.expect(self.prompt)
self.sendline('ifconfig %s inet6 add fc00:db20:35b:7399::5/64' % self.iface_dut)
self.expect(self.prompt)
# TODO: specify these via config
self.sendline('ip route add 192.168.201.0/24 via 192.168.3.222')
self.expect(self.prompt)
self.sendline('ip route add 192.168.200.0/24 via 192.168.3.222')
self.expect(self.prompt)
self.sendline('ip -6 route add 2001:ed8:77b5:2000::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ip -6 route add 2001:ed8:77b5:2001::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut)
self.expect(self.prompt)
self.update_cmts_isc_dhcp_config(board_config)
self.sendline('/etc/init.d/isc-dhcp-server start')
# We expect both, so we need debian 9 or greater for this device
self.expect('Starting ISC DHCPv4 server.*dhcpd.')
self.expect('Starting ISC DHCPv6 server.*dhcpd.')
self.expect(self.prompt)
# this might be redundant, but since might not have a tftpd server running
# here we have to start one for the CM configs
self.start_tftp_server()
self.copy_cmts_provisioning_files(board_config)
self.sendline("sed 's/disable\\t\\t= yes/disable\\t\\t= no/g' -i /etc/xinetd.d/time")
self.expect(self.prompt)
self.sendline("grep -q flags.*=.*IPv6 /etc/xinetd.d/time || sed '/wait.*=/a\\\\tflags\\t\\t= IPv6' -i /etc/xinetd.d/time")
self.expect(self.prompt)
self.sendline('/etc/init.d/xinetd restart')
self.expect('Starting internet superserver: xinetd.')
self.expect(self.prompt)
def reprovision_board(self, board_config):
'''New DHCP, cfg files etc for board after it's been provisioned once'''
self.copy_cmts_provisioning_files(board_config)
self.update_cmts_isc_dhcp_config(board_config)
self.sendline('/etc/init.d/isc-dhcp-server restart')
self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*'])
self.expect(self.prompt)
def setup_dhcp_server(self):
# configure DHCP server
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut)
self.expect(self.prompt)
self.sendline('cat > /etc/dhcp/dhcpd.conf << EOF')
self.sendline('ddns-update-style none;')
self.sendline('option domain-name "bigfoot-test";')
self.sendline('option domain-name-servers %s;' % self.gw)
self.sendline('default-lease-time 600;')
self.sendline('max-lease-time 7200;')
# use the same netmask as the lan device
self.sendline('subnet %s netmask %s {' % (self.nw.network_address, self.nw.netmask))
self.sendline(' range %s %s;' % (self.nw.network_address + 10, self.nw.network_address + 100))
self.sendline(' option routers %s;' % self.gw)
self.sendline('}')
self.sendline('EOF')
self.expect(self.prompt)
self.sendline('/etc/init.d/isc-dhcp-server start')
self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*'])
self.expect(self.prompt)
def setup_dnsmasq(self):
self.sendline('cat > /etc/dnsmasq.conf << EOF')
self.sendline('server=8.8.4.4')
self.sendline('listen-address=127.0.0.1')
self.sendline('listen-address=%s' % self.gw)
self.sendline('EOF')
self.sendline('/etc/init.d/dnsmasq restart')
self.expect(self.prompt)
def setup_as_wan_gateway(self):
self.setup_dnsmasq()
self.sendline('killall iperf ab hping3')
self.expect(self.prompt)
self.sendline('\nsysctl net.ipv6.conf.all.disable_ipv6=0')
self.expect('sysctl ')
self.expect(self.prompt)
# potential cleanup so this wan device works
self.sendline('iptables -t nat -X')
self.expect(self.prompt)
self.sendline('iptables -t nat -F')
self.expect(self.prompt)
# set WAN ip address
if self.wan_dhcp:
self.sendline('/etc/init.d/isc-dhcp-server stop')
self.expect(self.prompt)
self.sendline('dhclient -r %s; dhclient %s' % (self.iface_dut, self.iface_dut))
self.expect(self.prompt)
self.gw = self.get_interface_ipaddr(self.iface_dut)
else:
self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw))
self.expect(self.prompt)
self.sendline('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
if not self.wan_cmts_provisioner:
self.setup_dhcp_server()
# configure routing
self.sendline('sysctl net.ipv4.ip_forward=1')
self.expect(self.prompt)
if self.wan_no_eth0 or self.wan_dhcp:
wan_uplink_iface = self.iface_dut
else:
wan_uplink_iface = "eth0"
wan_ip_uplink = self.get_interface_ipaddr(wan_uplink_iface)
self.sendline('iptables -t nat -A POSTROUTING -o %s -j SNAT --to-source %s' % (wan_uplink_iface, wan_ip_uplink))
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps')
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack')
self.expect(self.prompt)
self.sendline('ifconfig %s' % self.iface_dut)
self.expect(self.prompt)
self.turn_off_pppoe()
def setup_as_lan_device(self):
# potential cleanup so this wan device works
self.sendline('killall iperf ab hping3')
self.expect(self.prompt)
self.sendline('\niptables -t nat -X')
self.expect('iptables -t')
self.expect(self.prompt)
self.sendline('sysctl net.ipv6.conf.all.disable_ipv6=0')
self.expect(self.prompt)
self.sendline('sysctl net.ipv4.ip_forward=1')
self.expect(self.prompt)
self.sendline('iptables -t nat -F; iptables -t nat -X')
self.expect(self.prompt)
self.sendline('iptables -F; iptables -X')
self.expect(self.prompt)
self.sendline('iptables -t nat -A PREROUTING -p tcp --dport 222 -j DNAT --to-destination %s:22' % self.lan_gateway)
self.expect(self.prompt)
self.sendline('iptables -t nat -A POSTROUTING -o %s -p tcp --dport 22 -j MASQUERADE' % self.iface_dut)
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps')
self.expect(self.prompt)
self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack')
self.expect(self.prompt)
self.sendline('pkill --signal 9 -f dhclient.*%s' % self.iface_dut)
self.expect(self.prompt)
def start_lan_client(self, wan_gw=None):
self.sendline('\nifconfig %s up' % self.iface_dut)
self.expect('ifconfig %s up' % self.iface_dut)
self.expect(self.prompt)
self.sendline("dhclient -r %s" % self.iface_dut)
self.expect(self.prompt)
self.sendline('\nifconfig %s 0.0.0.0' % self.iface_dut)
self.expect(self.prompt)
self.sendline('rm /var/lib/dhcp/dhclient.leases')
self.expect(self.prompt)
self.sendline("sed -e 's/mv -f $new_resolv_conf $resolv_conf/cat $new_resolv_conf > $resolv_conf/g' -i /sbin/dhclient-script")
self.expect(self.prompt)
self.sendline('ip route del default dev eth0')
self.expect(self.prompt)
for attempt in range(3):
try:
self.sendline('dhclient -v %s' % self.iface_dut)
self.expect('DHCPOFFER', timeout=30)
self.expect(self.prompt)
break
except:
self.sendcontrol('c')
else:
raise Exception("Error: Device on LAN couldn't obtain address via DHCP.")
self.sendline('ifconfig %s' % self.iface_dut)
self.expect(self.prompt)
self.sendline('ip route')
# TODO: we should verify this so other way, because the they could be the same subnets
# in theory
i = self.expect(['default via %s dev %s' % (self.lan_gateway, self.iface_dut), pexpect.TIMEOUT], timeout=5)
if i == 1:
# bridged mode
self.is_bridged = True
# update gw
self.sendline("ip route list 0/0 | awk '{print $3}'")
self.expect_exact("ip route list 0/0 | awk '{print $3}'")
self.expect(self.prompt)
self.lan_gateway = ipaddress.IPv4Address(self.before.strip().decode())
ip_addr = self.get_interface_ipaddr(self.iface_dut)
self.sendline("ip route | grep %s | awk '{print $1}'" % ip_addr)
self.expect_exact("ip route | grep %s | awk '{print $1}'" % ip_addr)
self.expect(self.prompt)
self.lan_network = ipaddress.IPv4Network(self.before.strip().decode())
# Setup HTTP proxy, so board webserver is accessible via this device
self.sendline('curl --version')
self.expect(self.prompt)
self.sendline('ab -V')
self.expect(self.prompt)
self.sendline('nmap --version')
self.expect(self.prompt)
self.sendline("sed -i 's/^Port 8888/Port 8080/' /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf")
self.expect(self.prompt)
self.sendline("sed 's/#Allow/Allow/g' -i /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf")
self.expect(self.prompt)
self.sendline('/etc/init.d/tinyproxy restart')
self.expect('Restarting')
self.expect(self.prompt)
# Write a useful ssh config for routers
self.sendline('mkdir -p ~/.ssh')
self.sendline('cat > ~/.ssh/config << EOF')
self.sendline('Host %s' % self.lan_gateway)
self.sendline('StrictHostKeyChecking no')
self.sendline('UserKnownHostsFile=/dev/null')
self.sendline('')
self.sendline('Host krouter')
self.sendline('Hostname %s' % self.lan_gateway)
self.sendline('StrictHostKeyChecking no')
self.sendline('UserKnownHostsFile=/dev/null')
self.sendline('EOF')
self.expect(self.prompt)
# Copy an id to the router so people don't have to type a password to ssh or scp
self.sendline('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway)
self.expect_exact('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway)
if 0 == self.expect(['SSH'] + self.prompt, timeout=5) and not self.is_bridged:
self.sendcontrol('c')
self.expect(self.prompt)
self.sendline('[ -e /root/.ssh/id_rsa ] || ssh-keygen -N "" -f /root/.ssh/id_rsa')
if 0 != self.expect(['Protocol mismatch.'] + self.prompt):
self.sendline('scp ~/.ssh/id_rsa.pub %s:/etc/dropbear/authorized_keys' % self.lan_gateway)
if 0 == self.expect(['assword:'] + self.prompt):
self.sendline('password')
self.expect(self.prompt)
else:
self.sendcontrol('c')
self.expect(self.prompt)
if self.install_pkgs_after_dhcp:
self.install_pkgs()
if wan_gw is not None and 'options' in self.kwargs and \
'lan-fixed-route-to-wan' in self.kwargs['options']:
self.sendline('ip route add %s via %s' % (wan_gw, self.lan_gateway))
self.expect(self.prompt)
def add_new_user(self, id, pwd):
'''Create new login ID. But check if already exists'''
self.sendline('\nadduser %s' % id)
try:
self.expect_exact("Enter new UNIX password", timeout=5)
self.sendline('%s' % pwd)
self.expect_exact("Retype new UNIX password")
self.sendline('%s' % pwd)
self.expect_exact("Full Name []")
self.sendline('%s' % id)
self.expect_exact("Room Number []")
self.sendline('1')
self.expect_exact("Work Phone []")
self.sendline('4081234567')
self.expect_exact("Home Phone []")
self.sendline('4081234567')
self.expect_exact("Other []")
self.sendline('4081234567')
self.expect_exact("Is the information correct?")
self.sendline('y')
self.expect(self.prompt)
self.sendline('usermod -aG sudo %s' % id)
self.expect(self.prompt)
# Remove "$" in the login prompt and replace it with "#"
self.sendline('sed -i \'s/\\w\\\$ /\\\w# /g\' //home/%s/.bashrc' % id)
self.expect(self.prompt, timeout=30)
except:
self.expect(self.prompt, timeout=30)
def tftp_server_ip_int(self):
'''Returns the DUT facing side tftp server ip'''
return self.gw
if __name__ == '__main__':
# Example use
try:
ipaddr, port = sys.argv[1].split(':')
except:
raise Exception("First argument should be in form of ipaddr:port")
dev = DebianBox(ipaddr=ipaddr,
color='blue',
username="root",
password="<PASSWORD>",
port=port)
dev.sendline('echo Hello')
dev.expect('Hello', timeout=4)
dev.expect(dev.prompt)
if sys.argv[2] == "setup_as_lan_device":
dev.configure("lan_device")
if sys.argv[2] == "setup_as_wan_gateway":
dev.configure("wan_device")
if sys.argv[2] == "test_voip":
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.getcwd() + '/tests')
from lib import installers
installers.install_asterisk(dev)
print | en | 0.2635 | # Copyright (c) 2015 # # All rights reserved. # # This file is distributed under the Clear BSD license. # The full text can be found in LICENSE in the root directory. A linux machine running an ssh server. #', '/ # ', ".*:~ #" ] # we need to pick a non-conflicting private network here # also we want it to be consistant and not random for a particular # board # override above values if set in wan options # TODO: remove wan-static-route at some point above # if we did initially get a prompt wait for one here # Wait for the network to go down. # TODO: use netns for all this? # TODO: don't reference eth0, but the uplink iface # we can call this first, before configure so we need to do this here # as well # the entire reason to start tftp is to copy files to devices # which we do via ssh so let's start that as well # set WAN ip address, for now this will always be this address for the device side # TODO: fix gateway for non-WAN tftp_server #configure tftp server cat << EOFEOFEOFEOF | xxd -r -p | gunzip > %s %s EOFEOFEOFEOF # TODO: add some ppint handle this more robustly cat > /etc/dhcp/dhcpd.conf << EOF log-facility local7; option log-servers 192.168.3.1; option time-servers 192.168.3.1; next-server 192.168.3.1; default-lease-time 604800; max-lease-time 604800; allow leasequery; option space docsis-mta; option docsis-mta.dhcp-server-1 code 1 = ip-address; option docsis-mta.dhcp-server-1 192.168.3.1; option docsis-mta.dhcp-server-2 code 2 = ip-address; option docsis-mta.dhcp-server-2 192.168.3.1; option docsis-mta.provision-server code 3 = { integer 8, string }; option docsis-mta.provision-server 0 08:54:43:4F:4D:4C:41:42:53:03:43:4F:4D:00 ; option docsis-mta-encap code 122 = encapsulate docsis-mta; option docsis-mta.kerberos-realm code 6 = string; option docsis-mta.kerberos-realm 05:42:41:53:49:43:01:31:00 ; subnet 192.168.3.0 netmask 255.255.255.0 { interface %s; } subnet 192.168.200.0 netmask 255.255.255.0 { interface %s; range 192.168.200.10 192.168.200.250; option routers 192.168.200.1; option broadcast-address 192.168.200.255; option dhcp-parameter-request-list 43; option domain-name "local"; option time-offset 1; option tftp-server-name "192.168.3.1"; filename "UNLIMITCASA.cfg"; allow unknown-clients; } subnet 192.168.201.0 netmask 255.255.255.0 { interface %s; range 192.168.201.10 192.168.201.250; option routers 192.168.201.1; option broadcast-address 192.168.201.255; option time-offset 1; option domain-name-servers %s; allow unknown-clients; } EOF # The board will ignore this unless the docsis-mac is set to ipv6 # That needs to be done manually as well as copying any CM cfg files # to the provisioner (e.g. still not fully automated) cat > /etc/dhcp/dhcpd6.conf << EOF preferred-lifetime 7500; option dhcp-renewal-time 3600; option dhcp-rebinding-time 5400; allow leasequery; option dhcp6.name-servers fdf8:f53e:61e4::18; option dhcp6.domain-search "test.example.com","example.com"; option dhcp6.info-refresh-time 21600; option dhcp6.ia_pd code 25 = { integer 32, integer 32, integer 32, integer 16, integer 16, integer 32, integer 32, integer 8, ip6-address}; option dhcp6.gateway code 32003 = ip6-address; option space docsis code width 2 length width 2 hash size 100; option docsis.tftp-servers code 32 = array of ip6-address; option docsis.configuration-file code 33 = text; option docsis.syslog-servers code 34 = array of ip6-address; #option docsis.device-id code 36 = string; option docsis.time-servers code 37 = array of ip6-address; option docsis.time-offset code 38 = signed integer 32; option vsio.docsis code 4491 = encapsulate docsis; subnet6 2001:ed8:77b5:3::/64 { range6 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fc00:e968:6179::de52:7100; interface %s; option docsis.tftp-servers fc00:db20:35b:7399::5; option docsis.time-servers fc00:db20:35b:7399::5; option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg"; option docsis.syslog-servers fc00:db20:35b:7399::5 ; option docsis.time-offset 5000; } subnet6 2001:ed8:77b5:2000::/64 { range6 fc00:db20:35b:7399::5 fdf8:f53e:61e4::18; interface %s; option docsis.tftp-servers fc00:db20:35b:7399::5; option docsis.time-servers fc00:db20:35b:7399::5; option docsis.configuration-file "9_EU_CBN_IPv6_LG.cfg"; option docsis.syslog-servers fc00:db20:35b:7399::5; option docsis.time-offset 5000; } subnet6 2001:ed8:77b5:2001::/64 { range6 fc00:e968:6179::de52:7100 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b; interface %s; option dhcp6.ia_pd 1234 20000 40000 26 25 30000 60000 64 2001:ed8:77b5:4::; option dhcp6.solmax-rt 240; option dhcp6.inf-max-rt 360; } EOF + board_config['station']) self.expect(self.prompt) if 'extra_provisioning' in board_config: cfg_file = "/etc/dhcp/dhcpd.conf." + board_config['station'] # zero out old config self.sendline('cp /dev/null %s' % cfg_file) self.expect(self.prompt) # there is probably a better way to construct this file... for dev, cfg_sec in board_config['extra_provisioning'].iteritems(): self.sendline("echo 'host %s-%s {' >> %s" % (dev, board_config['station'], cfg_file)) for key, value in cfg_sec.iteritems(): if key == "options": for k2, v2 in value.iteritems(): self.sendline("echo ' option %s %s;' >> %s" % (k2, v2, cfg_file)) self.expect(self.prompt) else: self.sendline("echo ' %s %s;' >> %s" % (key, value, cfg_file)) self.expect(self.prompt) self.sendline("echo '}' >> %s" % cfg_file) # TODO: extra per board dhcp6 provisioning # combine all configs into one self.sendline("cat /etc/dhcp/dhcpd.conf.* >> /etc/dhcp/dhcpd.conf") self.expect(self.prompt) def copy_cmts_provisioning_files(self, board_config): # Look in all overlays as well, and PATH as a workaround for standalone paths = os.environ['PATH'].split(os.pathsep) paths += os.environ['BFT_OVERLAY'].split(' ') cfg_list = [] if 'tftp_cfg_files' in board_config: for path in paths: for cfg in board_config['tftp_cfg_files']: cfg_list += glob.glob(path + '/devices/cm-cfg/%s' % cfg) else: for path in paths: cfg_list += glob.glob(path + '/devices/cm-cfg/UNLIMITCASA.cfg') cfg_set = set(cfg_list) # Copy binary files to tftp server for cfg in cfg_set: # TODO: use common cmd_exists cmd_exists = lambda x: any(os.access(os.path.join(path, x), os.X_OK) for path in os.environ["PATH"].split(os.pathsep)) assert cmd_exists('docsis') # TODO: much better error checking os.system("docsis -e %s /dev/null %s" % (cfg, cfg.replace('.txt', '.bin'))) self.copy_file_to_server(cfg.replace('.txt', '.bin')) os.remove(cfg.replace('.txt', '.bin')) def provision_board(self, board_config): self.sendline('/etc/init.d/isc-dhcp-server stop') self.expect(self.prompt) self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) # we are bypass this for now (see http://patchwork.ozlabs.org/patch/117949/) self.sendline('sysctl -w net.ipv6.conf.%s.accept_dad=0' % self.iface_dut) self.expect(self.prompt) self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw)) self.expect(self.prompt) self.sendline('ifconfig %s inet6 add fc00:db20:35b:7399::5/64' % self.iface_dut) self.expect(self.prompt) # TODO: specify these via config self.sendline('ip route add 192.168.201.0/24 via 192.168.3.222') self.expect(self.prompt) self.sendline('ip route add 192.168.200.0/24 via 192.168.3.222') self.expect(self.prompt) self.sendline('ip -6 route add 2001:ed8:77b5:2000::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut) self.expect(self.prompt) self.sendline('ip -6 route add 2001:ed8:77b5:2001::/64 via fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b dev %s metric 1024' % self.iface_dut) self.expect(self.prompt) self.update_cmts_isc_dhcp_config(board_config) self.sendline('/etc/init.d/isc-dhcp-server start') # We expect both, so we need debian 9 or greater for this device self.expect('Starting ISC DHCPv4 server.*dhcpd.') self.expect('Starting ISC DHCPv6 server.*dhcpd.') self.expect(self.prompt) # this might be redundant, but since might not have a tftpd server running # here we have to start one for the CM configs self.start_tftp_server() self.copy_cmts_provisioning_files(board_config) self.sendline("sed 's/disable\\t\\t= yes/disable\\t\\t= no/g' -i /etc/xinetd.d/time") self.expect(self.prompt) self.sendline("grep -q flags.*=.*IPv6 /etc/xinetd.d/time || sed '/wait.*=/a\\\\tflags\\t\\t= IPv6' -i /etc/xinetd.d/time") self.expect(self.prompt) self.sendline('/etc/init.d/xinetd restart') self.expect('Starting internet superserver: xinetd.') self.expect(self.prompt) def reprovision_board(self, board_config): self.copy_cmts_provisioning_files(board_config) self.update_cmts_isc_dhcp_config(board_config) self.sendline('/etc/init.d/isc-dhcp-server restart') self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*']) self.expect(self.prompt) def setup_dhcp_server(self): # configure DHCP server self.sendline('/etc/init.d/isc-dhcp-server stop') self.expect(self.prompt) self.sendline('sed s/INTERFACES=.*/INTERFACES=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) self.sendline('sed s/INTERFACESv4=.*/INTERFACESv4=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) self.sendline('sed s/INTERFACESv6=.*/INTERFACESv6=\\"%s\\"/g -i /etc/default/isc-dhcp-server' % self.iface_dut) self.expect(self.prompt) self.sendline('cat > /etc/dhcp/dhcpd.conf << EOF') self.sendline('ddns-update-style none;') self.sendline('option domain-name "bigfoot-test";') self.sendline('option domain-name-servers %s;' % self.gw) self.sendline('default-lease-time 600;') self.sendline('max-lease-time 7200;') # use the same netmask as the lan device self.sendline('subnet %s netmask %s {' % (self.nw.network_address, self.nw.netmask)) self.sendline(' range %s %s;' % (self.nw.network_address + 10, self.nw.network_address + 100)) self.sendline(' option routers %s;' % self.gw) self.sendline('}') self.sendline('EOF') self.expect(self.prompt) self.sendline('/etc/init.d/isc-dhcp-server start') self.expect(['Starting ISC DHCP(v4)? server.*dhcpd.', 'Starting isc-dhcp-server.*']) self.expect(self.prompt) def setup_dnsmasq(self): self.sendline('cat > /etc/dnsmasq.conf << EOF') self.sendline('server=8.8.4.4') self.sendline('listen-address=127.0.0.1') self.sendline('listen-address=%s' % self.gw) self.sendline('EOF') self.sendline('/etc/init.d/dnsmasq restart') self.expect(self.prompt) def setup_as_wan_gateway(self): self.setup_dnsmasq() self.sendline('killall iperf ab hping3') self.expect(self.prompt) self.sendline('\nsysctl net.ipv6.conf.all.disable_ipv6=0') self.expect('sysctl ') self.expect(self.prompt) # potential cleanup so this wan device works self.sendline('iptables -t nat -X') self.expect(self.prompt) self.sendline('iptables -t nat -F') self.expect(self.prompt) # set WAN ip address if self.wan_dhcp: self.sendline('/etc/init.d/isc-dhcp-server stop') self.expect(self.prompt) self.sendline('dhclient -r %s; dhclient %s' % (self.iface_dut, self.iface_dut)) self.expect(self.prompt) self.gw = self.get_interface_ipaddr(self.iface_dut) else: self.sendline('ifconfig %s %s' % (self.iface_dut, self.gw)) self.expect(self.prompt) self.sendline('ifconfig %s up' % self.iface_dut) self.expect(self.prompt) if not self.wan_cmts_provisioner: self.setup_dhcp_server() # configure routing self.sendline('sysctl net.ipv4.ip_forward=1') self.expect(self.prompt) if self.wan_no_eth0 or self.wan_dhcp: wan_uplink_iface = self.iface_dut else: wan_uplink_iface = "eth0" wan_ip_uplink = self.get_interface_ipaddr(wan_uplink_iface) self.sendline('iptables -t nat -A POSTROUTING -o %s -j SNAT --to-source %s' % (wan_uplink_iface, wan_ip_uplink)) self.expect(self.prompt) self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps') self.expect(self.prompt) self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack') self.expect(self.prompt) self.sendline('ifconfig %s' % self.iface_dut) self.expect(self.prompt) self.turn_off_pppoe() def setup_as_lan_device(self): # potential cleanup so this wan device works self.sendline('killall iperf ab hping3') self.expect(self.prompt) self.sendline('\niptables -t nat -X') self.expect('iptables -t') self.expect(self.prompt) self.sendline('sysctl net.ipv6.conf.all.disable_ipv6=0') self.expect(self.prompt) self.sendline('sysctl net.ipv4.ip_forward=1') self.expect(self.prompt) self.sendline('iptables -t nat -F; iptables -t nat -X') self.expect(self.prompt) self.sendline('iptables -F; iptables -X') self.expect(self.prompt) self.sendline('iptables -t nat -A PREROUTING -p tcp --dport 222 -j DNAT --to-destination %s:22' % self.lan_gateway) self.expect(self.prompt) self.sendline('iptables -t nat -A POSTROUTING -o %s -p tcp --dport 22 -j MASQUERADE' % self.iface_dut) self.expect(self.prompt) self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_timestamps') self.expect(self.prompt) self.sendline('echo 0 > /proc/sys/net/ipv4/tcp_sack') self.expect(self.prompt) self.sendline('pkill --signal 9 -f dhclient.*%s' % self.iface_dut) self.expect(self.prompt) def start_lan_client(self, wan_gw=None): self.sendline('\nifconfig %s up' % self.iface_dut) self.expect('ifconfig %s up' % self.iface_dut) self.expect(self.prompt) self.sendline("dhclient -r %s" % self.iface_dut) self.expect(self.prompt) self.sendline('\nifconfig %s 0.0.0.0' % self.iface_dut) self.expect(self.prompt) self.sendline('rm /var/lib/dhcp/dhclient.leases') self.expect(self.prompt) self.sendline("sed -e 's/mv -f $new_resolv_conf $resolv_conf/cat $new_resolv_conf > $resolv_conf/g' -i /sbin/dhclient-script") self.expect(self.prompt) self.sendline('ip route del default dev eth0') self.expect(self.prompt) for attempt in range(3): try: self.sendline('dhclient -v %s' % self.iface_dut) self.expect('DHCPOFFER', timeout=30) self.expect(self.prompt) break except: self.sendcontrol('c') else: raise Exception("Error: Device on LAN couldn't obtain address via DHCP.") self.sendline('ifconfig %s' % self.iface_dut) self.expect(self.prompt) self.sendline('ip route') # TODO: we should verify this so other way, because the they could be the same subnets # in theory i = self.expect(['default via %s dev %s' % (self.lan_gateway, self.iface_dut), pexpect.TIMEOUT], timeout=5) if i == 1: # bridged mode self.is_bridged = True # update gw self.sendline("ip route list 0/0 | awk '{print $3}'") self.expect_exact("ip route list 0/0 | awk '{print $3}'") self.expect(self.prompt) self.lan_gateway = ipaddress.IPv4Address(self.before.strip().decode()) ip_addr = self.get_interface_ipaddr(self.iface_dut) self.sendline("ip route | grep %s | awk '{print $1}'" % ip_addr) self.expect_exact("ip route | grep %s | awk '{print $1}'" % ip_addr) self.expect(self.prompt) self.lan_network = ipaddress.IPv4Network(self.before.strip().decode()) # Setup HTTP proxy, so board webserver is accessible via this device self.sendline('curl --version') self.expect(self.prompt) self.sendline('ab -V') self.expect(self.prompt) self.sendline('nmap --version') self.expect(self.prompt) self.sendline("sed -i 's/^Port 8888/Port 8080/' /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf") self.expect(self.prompt) self.sendline("sed 's/#Allow/Allow/g' -i /etc/tinyproxy.conf /etc/tinyproxy/tinyproxy.conf") self.expect(self.prompt) self.sendline('/etc/init.d/tinyproxy restart') self.expect('Restarting') self.expect(self.prompt) # Write a useful ssh config for routers self.sendline('mkdir -p ~/.ssh') self.sendline('cat > ~/.ssh/config << EOF') self.sendline('Host %s' % self.lan_gateway) self.sendline('StrictHostKeyChecking no') self.sendline('UserKnownHostsFile=/dev/null') self.sendline('') self.sendline('Host krouter') self.sendline('Hostname %s' % self.lan_gateway) self.sendline('StrictHostKeyChecking no') self.sendline('UserKnownHostsFile=/dev/null') self.sendline('EOF') self.expect(self.prompt) # Copy an id to the router so people don't have to type a password to ssh or scp self.sendline('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway) self.expect_exact('nc %s 22 -w 1 | cut -c1-3' % self.lan_gateway) if 0 == self.expect(['SSH'] + self.prompt, timeout=5) and not self.is_bridged: self.sendcontrol('c') self.expect(self.prompt) self.sendline('[ -e /root/.ssh/id_rsa ] || ssh-keygen -N "" -f /root/.ssh/id_rsa') if 0 != self.expect(['Protocol mismatch.'] + self.prompt): self.sendline('scp ~/.ssh/id_rsa.pub %s:/etc/dropbear/authorized_keys' % self.lan_gateway) if 0 == self.expect(['assword:'] + self.prompt): self.sendline('password') self.expect(self.prompt) else: self.sendcontrol('c') self.expect(self.prompt) if self.install_pkgs_after_dhcp: self.install_pkgs() if wan_gw is not None and 'options' in self.kwargs and \ 'lan-fixed-route-to-wan' in self.kwargs['options']: self.sendline('ip route add %s via %s' % (wan_gw, self.lan_gateway)) self.expect(self.prompt) def add_new_user(self, id, pwd): self.sendline('\nadduser %s' % id) try: self.expect_exact("Enter new UNIX password", timeout=5) self.sendline('%s' % pwd) self.expect_exact("Retype new UNIX password") self.sendline('%s' % pwd) self.expect_exact("Full Name []") self.sendline('%s' % id) self.expect_exact("Room Number []") self.sendline('1') self.expect_exact("Work Phone []") self.sendline('4081234567') self.expect_exact("Home Phone []") self.sendline('4081234567') self.expect_exact("Other []") self.sendline('4081234567') self.expect_exact("Is the information correct?") self.sendline('y') self.expect(self.prompt) self.sendline('usermod -aG sudo %s' % id) self.expect(self.prompt) # Remove "$" in the login prompt and replace it with "#" self.sendline('sed -i \'s/\\w\\\$ /\\\w# /g\' //home/%s/.bashrc' % id) self.expect(self.prompt, timeout=30) except: self.expect(self.prompt, timeout=30) def tftp_server_ip_int(self): # Example use | 2.200315 | 2 |
river/utils/random.py | fox-ds/river | 2,184 | 6624896 | <reponame>fox-ds/river<gh_stars>1000+
import math
import random
__all__ = ["poisson"]
def poisson(rate: float, rng=random) -> int:
"""Sample a random value from a Poisson distribution.
Parameters
----------
rate
rng
References
----------
[^1] [Wikipedia article](https://www.wikiwand.com/en/Poisson_distribution#/Generating_Poisson-distributed_random_variables)
"""
L = math.exp(-rate)
k = 0
p = 1
while p > L:
k += 1
p *= rng.random()
return k - 1
| import math
import random
__all__ = ["poisson"]
def poisson(rate: float, rng=random) -> int:
"""Sample a random value from a Poisson distribution.
Parameters
----------
rate
rng
References
----------
[^1] [Wikipedia article](https://www.wikiwand.com/en/Poisson_distribution#/Generating_Poisson-distributed_random_variables)
"""
L = math.exp(-rate)
k = 0
p = 1
while p > L:
k += 1
p *= rng.random()
return k - 1 | en | 0.460012 | Sample a random value from a Poisson distribution. Parameters ---------- rate rng References ---------- [^1] [Wikipedia article](https://www.wikiwand.com/en/Poisson_distribution#/Generating_Poisson-distributed_random_variables) | 3.793205 | 4 |
config_manager/eucalyptus/topology/__init__.py | tbeckham/DeploymentManager | 0 | 6624897 | <filename>config_manager/eucalyptus/topology/__init__.py
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from config_manager.baseconfig import BaseConfig
from config_manager.eucalyptus.topology.cluster import Cluster
from config_manager.eucalyptus.topology.cloud_controller import CloudController
from config_manager.eucalyptus.topology.walrus import Walrus
from config_manager.eucalyptus.topology.ufs import UserFacingServices
class Topology(BaseConfig):
def __init__(self,
name=None,
description=None,
read_file_path=None,
write_file_path=None,
version=None):
description = description or "Eucalyptus Cloud Topology Configuration Block"
self.cloud_controllers = self.create_property('cloud_controller')
self.walrus = self.create_property('walrus')
self.user_facing_services = self.create_property('user_facing')
self.clusters_property = self.create_property('clusters', value={})
super(Topology, self).__init__(name=name,
description=description,
write_file_path=write_file_path,
read_file_path=read_file_path,
version=version)
def add_clusters(self, clusters):
if not clusters:
raise ValueError('add_clusters provided empty value: "{0}"'
.format(clusters))
if not isinstance(clusters, list):
clusters = [clusters]
for cluster in clusters:
assert isinstance(cluster, Cluster), 'add clusters passed non ' \
'cluster type, cluster:"{0}"' \
.format(cluster)
if self.get_cluster(cluster.name.value):
raise ValueError('Cluster with name:"{0}" already exists'
.format(cluster.name.value))
self.clusters_property.value[cluster.name.value] = cluster
def create_cluster(self, name, hypervisor, read_file_path=None, write_file_path=None):
cluster = Cluster(name=name, hypervisor=hypervisor, read_file_path=read_file_path,
write_file_path=write_file_path)
self.add_clusters(cluster)
return cluster
def get_cluster(self, clustername):
if clustername in self.clusters_property.value:
return self.clusters_property.value[clustername]
return None
def delete_cluster(self, clustername):
if clustername in self.clusters_property.value:
self.clusters_property.value.pop(clustername)
else:
print 'clustername:"{0}" not in cluster list'.format(clustername)
def add_cloud_controllers(self, clcs):
if clcs is None:
raise ValueError('add_cloud_controllers provided empty '
'value: "{0}"'.format(clcs))
if not isinstance(clcs, list):
clcs = [clcs]
if self.cloud_controllers_property is None:
self.cloud_controllers_property = []
for clc in clcs:
self.cloud_controllers_property.value.append(clc)
def add_walrus(self, walrus):
self.walrus = walrus
def add_user_facing_services(self, user_facing_services):
self.user_facing_services = user_facing_services
def _aggregate_eucalyptus_properties(self, show_all=False):
eucaproperties = {}
for key in self.clusters_property.value:
cluster = self.clusters_property.value[key]
eucaproperties.update(cluster._aggregate_eucalyptus_properties(show_all=show_all))
agg_dict = super(Topology, self)._aggregate_eucalyptus_properties(show_all=show_all)
eucaproperties.update(agg_dict)
return eucaproperties
| <filename>config_manager/eucalyptus/topology/__init__.py
#!/usr/bin/env python
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from config_manager.baseconfig import BaseConfig
from config_manager.eucalyptus.topology.cluster import Cluster
from config_manager.eucalyptus.topology.cloud_controller import CloudController
from config_manager.eucalyptus.topology.walrus import Walrus
from config_manager.eucalyptus.topology.ufs import UserFacingServices
class Topology(BaseConfig):
def __init__(self,
name=None,
description=None,
read_file_path=None,
write_file_path=None,
version=None):
description = description or "Eucalyptus Cloud Topology Configuration Block"
self.cloud_controllers = self.create_property('cloud_controller')
self.walrus = self.create_property('walrus')
self.user_facing_services = self.create_property('user_facing')
self.clusters_property = self.create_property('clusters', value={})
super(Topology, self).__init__(name=name,
description=description,
write_file_path=write_file_path,
read_file_path=read_file_path,
version=version)
def add_clusters(self, clusters):
if not clusters:
raise ValueError('add_clusters provided empty value: "{0}"'
.format(clusters))
if not isinstance(clusters, list):
clusters = [clusters]
for cluster in clusters:
assert isinstance(cluster, Cluster), 'add clusters passed non ' \
'cluster type, cluster:"{0}"' \
.format(cluster)
if self.get_cluster(cluster.name.value):
raise ValueError('Cluster with name:"{0}" already exists'
.format(cluster.name.value))
self.clusters_property.value[cluster.name.value] = cluster
def create_cluster(self, name, hypervisor, read_file_path=None, write_file_path=None):
cluster = Cluster(name=name, hypervisor=hypervisor, read_file_path=read_file_path,
write_file_path=write_file_path)
self.add_clusters(cluster)
return cluster
def get_cluster(self, clustername):
if clustername in self.clusters_property.value:
return self.clusters_property.value[clustername]
return None
def delete_cluster(self, clustername):
if clustername in self.clusters_property.value:
self.clusters_property.value.pop(clustername)
else:
print 'clustername:"{0}" not in cluster list'.format(clustername)
def add_cloud_controllers(self, clcs):
if clcs is None:
raise ValueError('add_cloud_controllers provided empty '
'value: "{0}"'.format(clcs))
if not isinstance(clcs, list):
clcs = [clcs]
if self.cloud_controllers_property is None:
self.cloud_controllers_property = []
for clc in clcs:
self.cloud_controllers_property.value.append(clc)
def add_walrus(self, walrus):
self.walrus = walrus
def add_user_facing_services(self, user_facing_services):
self.user_facing_services = user_facing_services
def _aggregate_eucalyptus_properties(self, show_all=False):
eucaproperties = {}
for key in self.clusters_property.value:
cluster = self.clusters_property.value[key]
eucaproperties.update(cluster._aggregate_eucalyptus_properties(show_all=show_all))
agg_dict = super(Topology, self)._aggregate_eucalyptus_properties(show_all=show_all)
eucaproperties.update(agg_dict)
return eucaproperties
| en | 0.818407 | #!/usr/bin/env python # Copyright 2009-2014 Eucalyptus Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.354354 | 2 |
tests/test_storage.py | jfinkels/sbclassifier | 3 | 6624898 | # test_storage.py - unit tests for the sbclassifier.storage module
#
# Copyright (C) 2002-2013 Python Software Foundation; All Rights Reserved
# Copyright 2014 <NAME>.
#
# This file is part of sbclassifier, which is licensed under the Python
# Software Foundation License; for more information, see LICENSE.txt.
import glob
import os
import tempfile
import unittest
from sbclassifier.classifiers.storage import CDBClassifier
from sbclassifier.classifiers.storage import ShelveClassifier
from sbclassifier.classifiers.storage import PickleClassifier
#from sbclassifier.classifiers.storage import ZODBClassifier
try:
import cdb
cdb_is_available = True
except ImportError:
cdb_is_available = False
# try:
# import ZODB
# zodb_is_available = True
# except ImportError:
# zodb_is_available = False
class _StorageTestBase(unittest.TestCase):
# Subclass must define a concrete StorageClass.
StorageClass = None
def setUp(self):
self.db_name = tempfile.mktemp("spambayestest")
self.classifier = self.StorageClass(self.db_name)
def tearDown(self):
self.classifier.close()
self.classifier = None
for name in glob.glob(self.db_name + "*"):
if os.path.isfile(name):
os.remove(name)
def testLoadAndStore(self):
# Simple test to verify that putting data in the db, storing and
# then loading gives back the same data.
c = self.classifier
c.learn(["some", "simple", "tokens"], True)
c.learn(["some", "other"], False)
c.learn(["ones"], False)
c.store()
c.close()
del self.classifier
self.classifier = self.StorageClass(self.db_name)
self._checkAllWordCounts((("some", 1, 1),
("simple", 0, 1),
("tokens", 0, 1),
("other", 1, 0),
("ones", 1, 0)), False)
self.assertEqual(self.classifier.nham, 2)
self.assertEqual(self.classifier.nspam, 1)
def testCounts(self):
# Check that nham and nspam are correctedly adjusted.
c = self.classifier
count = 30
for i in range(count):
c.learn(["tony"], True)
self.assertEqual(c.nspam, i + 1)
self.assertEqual(c.nham, 0)
for i in range(count):
c.learn(["tony"], False)
self.assertEqual(c.nham, i + 1)
self.assertEqual(c.nspam, count)
for i in range(count):
c.unlearn(["tony"], True)
self.assertEqual(c.nham, count)
self.assertEqual(c.nspam, count - i - 1)
for i in range(count):
c.unlearn(["tony"], False)
self.assertEqual(c.nham, count - i - 1)
self.assertEqual(c.nspam, 0)
def _checkWordCounts(self, word, expected_ham, expected_spam):
assert word
info = self.classifier._wordinfoget(word)
if info is None:
if expected_ham == expected_spam == 0:
return
self.fail("_CheckWordCounts for '%s' got None!" % word)
if info.hamcount != expected_ham:
self.fail("Hamcount '%s' wrong - got %d, but expected %d"
% (word, info.hamcount, expected_ham))
if info.spamcount != expected_spam:
self.fail("Spamcount '%s' wrong - got %d, but expected %d"
% (word, info.spamcount, expected_spam))
def _checkAllWordCounts(self, counts, do_persist):
for info in counts:
self._checkWordCounts(*info)
if do_persist:
self.classifier.store()
self.classifier.load()
self._checkAllWordCounts(counts, False)
def testHapax(self):
self._dotestHapax(False)
self._dotestHapax(True)
def _dotestHapax(self, do_persist):
c = self.classifier
c.learn(["common", "nearly_hapax", "hapax", ], False)
c.learn(["common", "nearly_hapax"], False)
c.learn(["common"], False)
# All the words should be there.
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Unlearn the complete set.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Re-learn that set, so deleted hapax is reloaded
c.learn(["common", "nearly_hapax", "hapax", ], False)
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Back to where we started - start unlearning all down to zero.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Unlearn the next set.
c.unlearn(["common", "nearly_hapax"], False)
self._checkAllWordCounts((("common", 1, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
c.unlearn(["common"], False)
self._checkAllWordCounts((("common", 0, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
def test_bug777026(self):
c = self.classifier
word = "tim"
c.learn([word], False)
c.learn([word], False)
self._checkAllWordCounts([(word, 2, 0)], False)
# Clone word's WordInfo record.
record = self.classifier.wordinfo[word]
newrecord = type(record)()
newrecord.__setstate__(record.__getstate__())
self.assertEqual(newrecord.hamcount, 2)
self.assertEqual(newrecord.spamcount, 0)
# Reduce the hamcount -- this tickled an excruciatingly subtle
# bug in a ShelveClassifier's _wordinfoset, which, at the time
# this test was written, couldn't actually be provoked by the
# way _wordinfoset got called by way of learn() and unlearn()
# methods. The code implicitly relied on that the record passed
# to _wordinfoset was always the same object as was already
# in wordinfo[word].
newrecord.hamcount -= 1
c._wordinfoset(word, newrecord)
# If the bug is present, the ShelveClassifier still believes
# the hamcount is 2.
self._checkAllWordCounts([(word, 1, 0)], False)
c.unlearn([word], False)
self._checkAllWordCounts([(word, 0, 0)], False)
# Test classes for each classifier.
class PickleStorageTestCase(_StorageTestBase):
StorageClass = PickleClassifier
class DBStorageTestCase(_StorageTestBase):
StorageClass = ShelveClassifier
def _fail_open_best(self, *args):
raise Exception("No dbm modules available!")
@unittest.skip('This is unnecessary')
def testNoDBMAvailable(self):
from sbclassifier.storage import open_storage
db_name = tempfile.mktemp("nodbmtest")
ShelveClassifier_load = ShelveClassifier.load
ShelveClassifier.load = self._fail_open_best
print("This test will print out an error, which can be ignored.")
try:
self.assertRaises(Exception, open_storage, (db_name, "dbm"))
finally:
ShelveClassifier.load = ShelveClassifier_load
for name in glob.glob(db_name+"*"):
if os.path.isfile(name):
os.remove(name)
@unittest.skipUnless(cdb_is_available, 'requires cdb')
class CDBStorageTestCase(_StorageTestBase):
StorageClass = CDBClassifier
# @unittest.skipUnless(zodb_is_available, 'requires ZODB')
# class ZODBStorageTestCase(_StorageTestBase):
# StorageClass = ZODBClassifier
| # test_storage.py - unit tests for the sbclassifier.storage module
#
# Copyright (C) 2002-2013 Python Software Foundation; All Rights Reserved
# Copyright 2014 <NAME>.
#
# This file is part of sbclassifier, which is licensed under the Python
# Software Foundation License; for more information, see LICENSE.txt.
import glob
import os
import tempfile
import unittest
from sbclassifier.classifiers.storage import CDBClassifier
from sbclassifier.classifiers.storage import ShelveClassifier
from sbclassifier.classifiers.storage import PickleClassifier
#from sbclassifier.classifiers.storage import ZODBClassifier
try:
import cdb
cdb_is_available = True
except ImportError:
cdb_is_available = False
# try:
# import ZODB
# zodb_is_available = True
# except ImportError:
# zodb_is_available = False
class _StorageTestBase(unittest.TestCase):
# Subclass must define a concrete StorageClass.
StorageClass = None
def setUp(self):
self.db_name = tempfile.mktemp("spambayestest")
self.classifier = self.StorageClass(self.db_name)
def tearDown(self):
self.classifier.close()
self.classifier = None
for name in glob.glob(self.db_name + "*"):
if os.path.isfile(name):
os.remove(name)
def testLoadAndStore(self):
# Simple test to verify that putting data in the db, storing and
# then loading gives back the same data.
c = self.classifier
c.learn(["some", "simple", "tokens"], True)
c.learn(["some", "other"], False)
c.learn(["ones"], False)
c.store()
c.close()
del self.classifier
self.classifier = self.StorageClass(self.db_name)
self._checkAllWordCounts((("some", 1, 1),
("simple", 0, 1),
("tokens", 0, 1),
("other", 1, 0),
("ones", 1, 0)), False)
self.assertEqual(self.classifier.nham, 2)
self.assertEqual(self.classifier.nspam, 1)
def testCounts(self):
# Check that nham and nspam are correctedly adjusted.
c = self.classifier
count = 30
for i in range(count):
c.learn(["tony"], True)
self.assertEqual(c.nspam, i + 1)
self.assertEqual(c.nham, 0)
for i in range(count):
c.learn(["tony"], False)
self.assertEqual(c.nham, i + 1)
self.assertEqual(c.nspam, count)
for i in range(count):
c.unlearn(["tony"], True)
self.assertEqual(c.nham, count)
self.assertEqual(c.nspam, count - i - 1)
for i in range(count):
c.unlearn(["tony"], False)
self.assertEqual(c.nham, count - i - 1)
self.assertEqual(c.nspam, 0)
def _checkWordCounts(self, word, expected_ham, expected_spam):
assert word
info = self.classifier._wordinfoget(word)
if info is None:
if expected_ham == expected_spam == 0:
return
self.fail("_CheckWordCounts for '%s' got None!" % word)
if info.hamcount != expected_ham:
self.fail("Hamcount '%s' wrong - got %d, but expected %d"
% (word, info.hamcount, expected_ham))
if info.spamcount != expected_spam:
self.fail("Spamcount '%s' wrong - got %d, but expected %d"
% (word, info.spamcount, expected_spam))
def _checkAllWordCounts(self, counts, do_persist):
for info in counts:
self._checkWordCounts(*info)
if do_persist:
self.classifier.store()
self.classifier.load()
self._checkAllWordCounts(counts, False)
def testHapax(self):
self._dotestHapax(False)
self._dotestHapax(True)
def _dotestHapax(self, do_persist):
c = self.classifier
c.learn(["common", "nearly_hapax", "hapax", ], False)
c.learn(["common", "nearly_hapax"], False)
c.learn(["common"], False)
# All the words should be there.
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Unlearn the complete set.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Re-learn that set, so deleted hapax is reloaded
c.learn(["common", "nearly_hapax", "hapax", ], False)
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Back to where we started - start unlearning all down to zero.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Unlearn the next set.
c.unlearn(["common", "nearly_hapax"], False)
self._checkAllWordCounts((("common", 1, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
c.unlearn(["common"], False)
self._checkAllWordCounts((("common", 0, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
def test_bug777026(self):
c = self.classifier
word = "tim"
c.learn([word], False)
c.learn([word], False)
self._checkAllWordCounts([(word, 2, 0)], False)
# Clone word's WordInfo record.
record = self.classifier.wordinfo[word]
newrecord = type(record)()
newrecord.__setstate__(record.__getstate__())
self.assertEqual(newrecord.hamcount, 2)
self.assertEqual(newrecord.spamcount, 0)
# Reduce the hamcount -- this tickled an excruciatingly subtle
# bug in a ShelveClassifier's _wordinfoset, which, at the time
# this test was written, couldn't actually be provoked by the
# way _wordinfoset got called by way of learn() and unlearn()
# methods. The code implicitly relied on that the record passed
# to _wordinfoset was always the same object as was already
# in wordinfo[word].
newrecord.hamcount -= 1
c._wordinfoset(word, newrecord)
# If the bug is present, the ShelveClassifier still believes
# the hamcount is 2.
self._checkAllWordCounts([(word, 1, 0)], False)
c.unlearn([word], False)
self._checkAllWordCounts([(word, 0, 0)], False)
# Test classes for each classifier.
class PickleStorageTestCase(_StorageTestBase):
StorageClass = PickleClassifier
class DBStorageTestCase(_StorageTestBase):
StorageClass = ShelveClassifier
def _fail_open_best(self, *args):
raise Exception("No dbm modules available!")
@unittest.skip('This is unnecessary')
def testNoDBMAvailable(self):
from sbclassifier.storage import open_storage
db_name = tempfile.mktemp("nodbmtest")
ShelveClassifier_load = ShelveClassifier.load
ShelveClassifier.load = self._fail_open_best
print("This test will print out an error, which can be ignored.")
try:
self.assertRaises(Exception, open_storage, (db_name, "dbm"))
finally:
ShelveClassifier.load = ShelveClassifier_load
for name in glob.glob(db_name+"*"):
if os.path.isfile(name):
os.remove(name)
@unittest.skipUnless(cdb_is_available, 'requires cdb')
class CDBStorageTestCase(_StorageTestBase):
StorageClass = CDBClassifier
# @unittest.skipUnless(zodb_is_available, 'requires ZODB')
# class ZODBStorageTestCase(_StorageTestBase):
# StorageClass = ZODBClassifier
| en | 0.798716 | # test_storage.py - unit tests for the sbclassifier.storage module # # Copyright (C) 2002-2013 Python Software Foundation; All Rights Reserved # Copyright 2014 <NAME>. # # This file is part of sbclassifier, which is licensed under the Python # Software Foundation License; for more information, see LICENSE.txt. #from sbclassifier.classifiers.storage import ZODBClassifier # try: # import ZODB # zodb_is_available = True # except ImportError: # zodb_is_available = False # Subclass must define a concrete StorageClass. # Simple test to verify that putting data in the db, storing and # then loading gives back the same data. # Check that nham and nspam are correctedly adjusted. # All the words should be there. # Unlearn the complete set. # 'hapax' removed, rest still there # Re-learn that set, so deleted hapax is reloaded # Back to where we started - start unlearning all down to zero. # 'hapax' removed, rest still there # Unlearn the next set. # Clone word's WordInfo record. # Reduce the hamcount -- this tickled an excruciatingly subtle # bug in a ShelveClassifier's _wordinfoset, which, at the time # this test was written, couldn't actually be provoked by the # way _wordinfoset got called by way of learn() and unlearn() # methods. The code implicitly relied on that the record passed # to _wordinfoset was always the same object as was already # in wordinfo[word]. # If the bug is present, the ShelveClassifier still believes # the hamcount is 2. # Test classes for each classifier. # @unittest.skipUnless(zodb_is_available, 'requires ZODB') # class ZODBStorageTestCase(_StorageTestBase): # StorageClass = ZODBClassifier | 2.555397 | 3 |
pokemongo_bot/plugin_loader.py | timgates42/PokemonGo-Bot | 5,362 | 6624899 | <reponame>timgates42/PokemonGo-Bot
from __future__ import print_function
import os
import sys
import importlib
import re
import requests
import zipfile
import shutil
class PluginLoader(object):
folder_cache = []
def _get_correct_path(self, path):
extension = os.path.splitext(path)[1]
if extension == '.zip':
correct_path = path
else:
correct_path = os.path.dirname(path)
return correct_path
def load_plugin(self, plugin):
github_plugin = GithubPlugin(plugin)
if github_plugin.is_valid_plugin():
if not github_plugin.is_already_installed():
github_plugin.install()
correct_path = github_plugin.get_plugin_folder()
else:
correct_path = self._get_correct_path(plugin)
if correct_path not in self.folder_cache:
self.folder_cache.append(correct_path)
sys.path.append(correct_path)
def remove_path(self, path):
correct_path = self._get_correct_path(path)
sys.path.remove(correct_path)
self.folder_cache.remove(correct_path)
def get_class(self, namespace_class):
[namespace, class_name] = namespace_class.split('.')
my_module = importlib.import_module(namespace)
return getattr(my_module, class_name)
class GithubPlugin(object):
PLUGINS_FOLDER = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'plugins')
def __init__(self, plugin_name):
self.plugin_name = plugin_name
self.plugin_parts = self.get_github_parts()
def is_valid_plugin(self):
return self.plugin_parts is not None
def get_github_parts(self):
groups = re.match('(.*)\/(.*)#(.*)', self.plugin_name)
if groups is None:
return None
parts = {}
parts['user'] = groups.group(1)
parts['repo'] = groups.group(2)
parts['sha'] = groups.group(3)
return parts
def get_installed_version(self):
if not self.is_already_installed():
return None
filename = os.path.join(self.get_plugin_folder(), '.sha')
print(filename)
with open(filename) as file:
return file.read().strip()
def get_local_destination(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
file_name = '{}_{}_{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
full_path = os.path.join(self.PLUGINS_FOLDER, file_name)
return full_path
def is_already_installed(self):
file_path = self.get_plugin_folder()
if not os.path.isdir(file_path):
return False
sha_file = os.path.join(file_path, '.sha')
if not os.path.isfile(sha_file):
return False
with open(sha_file) as file:
content = file.read().strip()
if content != self.plugin_parts['sha']:
return False
return True
def get_plugin_folder(self):
folder_name = '{}_{}'.format(self.plugin_parts['user'], self.plugin_parts['repo'])
return os.path.join(self.PLUGINS_FOLDER, folder_name)
def get_github_download_url(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
github_url = 'https://github.com/{}/{}/archive/{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
return github_url
def install(self):
self.download()
self.extract()
def extract(self):
dest = self.get_plugin_folder()
with zipfile.ZipFile(self.get_local_destination(), "r") as z:
z.extractall(dest)
github_folder = os.path.join(dest, '{}-{}'.format(self.plugin_parts['repo'], self.plugin_parts['sha']))
new_folder = os.path.join(dest, '{}'.format(self.plugin_parts['repo']))
shutil.move(github_folder, new_folder)
with open(os.path.join(dest, '.sha'), 'w') as file:
file.write(self.plugin_parts['sha'])
os.remove(self.get_local_destination())
def download(self):
url = self.get_github_download_url()
dest = self.get_local_destination()
r = requests.get(url, stream=True)
r.raise_for_status()
with open(dest, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
r.close()
return dest
| from __future__ import print_function
import os
import sys
import importlib
import re
import requests
import zipfile
import shutil
class PluginLoader(object):
folder_cache = []
def _get_correct_path(self, path):
extension = os.path.splitext(path)[1]
if extension == '.zip':
correct_path = path
else:
correct_path = os.path.dirname(path)
return correct_path
def load_plugin(self, plugin):
github_plugin = GithubPlugin(plugin)
if github_plugin.is_valid_plugin():
if not github_plugin.is_already_installed():
github_plugin.install()
correct_path = github_plugin.get_plugin_folder()
else:
correct_path = self._get_correct_path(plugin)
if correct_path not in self.folder_cache:
self.folder_cache.append(correct_path)
sys.path.append(correct_path)
def remove_path(self, path):
correct_path = self._get_correct_path(path)
sys.path.remove(correct_path)
self.folder_cache.remove(correct_path)
def get_class(self, namespace_class):
[namespace, class_name] = namespace_class.split('.')
my_module = importlib.import_module(namespace)
return getattr(my_module, class_name)
class GithubPlugin(object):
PLUGINS_FOLDER = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'plugins')
def __init__(self, plugin_name):
self.plugin_name = plugin_name
self.plugin_parts = self.get_github_parts()
def is_valid_plugin(self):
return self.plugin_parts is not None
def get_github_parts(self):
groups = re.match('(.*)\/(.*)#(.*)', self.plugin_name)
if groups is None:
return None
parts = {}
parts['user'] = groups.group(1)
parts['repo'] = groups.group(2)
parts['sha'] = groups.group(3)
return parts
def get_installed_version(self):
if not self.is_already_installed():
return None
filename = os.path.join(self.get_plugin_folder(), '.sha')
print(filename)
with open(filename) as file:
return file.read().strip()
def get_local_destination(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
file_name = '{}_{}_{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
full_path = os.path.join(self.PLUGINS_FOLDER, file_name)
return full_path
def is_already_installed(self):
file_path = self.get_plugin_folder()
if not os.path.isdir(file_path):
return False
sha_file = os.path.join(file_path, '.sha')
if not os.path.isfile(sha_file):
return False
with open(sha_file) as file:
content = file.read().strip()
if content != self.plugin_parts['sha']:
return False
return True
def get_plugin_folder(self):
folder_name = '{}_{}'.format(self.plugin_parts['user'], self.plugin_parts['repo'])
return os.path.join(self.PLUGINS_FOLDER, folder_name)
def get_github_download_url(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
github_url = 'https://github.com/{}/{}/archive/{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
return github_url
def install(self):
self.download()
self.extract()
def extract(self):
dest = self.get_plugin_folder()
with zipfile.ZipFile(self.get_local_destination(), "r") as z:
z.extractall(dest)
github_folder = os.path.join(dest, '{}-{}'.format(self.plugin_parts['repo'], self.plugin_parts['sha']))
new_folder = os.path.join(dest, '{}'.format(self.plugin_parts['repo']))
shutil.move(github_folder, new_folder)
with open(os.path.join(dest, '.sha'), 'w') as file:
file.write(self.plugin_parts['sha'])
os.remove(self.get_local_destination())
def download(self):
url = self.get_github_download_url()
dest = self.get_local_destination()
r = requests.get(url, stream=True)
r.raise_for_status()
with open(dest, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
r.close()
return dest | en | 0.16932 | #(.*)', self.plugin_name) | 2.469787 | 2 |
source/chapter_4/file_4_2.py | lintongtong123/JackokiePapers | 0 | 6624900 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/11 15:40
# @Author : <NAME>
# @Site : www.jackokie.com
# @File : file_4_2.py
# @Software: PyCharm
# @contact: <EMAIL> | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/11 15:40
# @Author : <NAME>
# @Site : www.jackokie.com
# @File : file_4_2.py
# @Software: PyCharm
# @contact: <EMAIL> | en | 0.183174 | #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/3/11 15:40 # @Author : <NAME> # @Site : www.jackokie.com # @File : file_4_2.py # @Software: PyCharm # @contact: <EMAIL> | 0.990586 | 1 |
src/sage/modular/modsym/boundary.py | vbraun/sage | 3 | 6624901 | # -*- coding: utf-8 -*-
r"""
Space of boundary modular symbols
Used mainly for computing the cuspidal subspace of modular symbols. The space
of boundary symbols of sign 0 is isomorphic as a Hecke module to the dual of
the space of Eisenstein series, but this does not give a useful method of
computing Eisenstein series, since there is no easy way to extract the constant
terms.
We represent boundary modular symbols as a sum of Manin symbols of the form
`[P, u/v]`, where `u/v` is a cusp for our group `G`. The group of boundary
modular symbols naturally embeds into a vector space `B_k(G)` (see Stein,
section 8.4, or Merel, section 1.4, where this space is called `\CC[\Gamma
\backslash \QQ]_k`, for a definition), which is a finite dimensional `\QQ`
vector space of dimension equal to the number of cusps for `G`. The embedding
takes `[P, u/v]` to `P(u,v)\cdot [(u,v)]`. We represent the basis vectors by
pairs `[(u,v)]` with u, v coprime. On `B_k(G)`, we have the relations
.. MATH::
[\gamma \cdot (u,v)] = [(u,v)]
for all `\gamma \in G` and
.. MATH::
[(\lambda u, \lambda v)] = \operatorname{sign}(\lambda)^k [(u,v)]
for all `\lambda \in \QQ^\times`.
It's possible for these relations to kill a class, i.e., for a pair `[(u,v)]`
to be 0. For example, when `N=4` and `k=3` then `(-1,-2)` is equivalent mod
`\Gamma_1(4)` to `(1,2)` since `2=-2 \bmod 4` and `1=-1 \bmod 2`. But since `k`
is odd, `[(-1,-2)]` is also equivalent to `-[(1,2)]`. Thus this symbol is
equivalent to its negative, hence 0 (notice that this wouldn't be the case in
characteristic 2). This happens for any irregular cusp when the weight is odd;
there are no irregular cusps on `\Gamma_1(N)` except when `N = 4`, but there
can be more on `\Gamma_H` groups. See also prop 2.30 of Stein's Ph.D. thesis.
In addition, in the case that our space is of sign `\sigma = 1` or `-1`, we
also have the relation `[(-u,v)] = \sigma \cdot [(u,v)]`. This relation can
also combine with the above to kill a cusp class - for instance, take (u,v) =
(1,3) for `\Gamma_1(5)`. Then since the cusp `\tfrac{1}{3}` is
`\Gamma_1(5)`-equivalent to the cusp `-\tfrac{1}{3}`, we have that `[(1,3)] =
[(-1,3)]`. Now, on the minus subspace, we also have that `[(-1,3)] = -[(1,3)]`,
which means this class must vanish. Notice that this cannot be used to show
that `[(1,0)]` or `[(0,1)]` is 0.
.. note::
Special care must be taken when working with the images of the cusps 0 and
`\infty` in `B_k(G)`. For all cusps *except* 0 and `\infty`, multiplying the
cusp by -1 corresponds to taking `[(u,v)]` to `[(-u,v)]` in `B_k(G)`. This
means that `[(u,v)]` is equivalent to `[(-u,v)]` whenever `\tfrac{u}{v}` is
equivalent to `-\tfrac{u}{v}`, except in the case of 0 and `\infty`. We
have the following conditions for `[(1,0)]` and `[(0,1)]`:
- `[(0,1)] = \sigma \cdot [(0,1)]`, so `[(0,1)]` is 0 exactly when `\sigma =
-1`.
- `[(1,0)] = \sigma \cdot [(-1,0)]` and `[(1,0)] = (-1)^k [(-1,0)]`, so
`[(1,0)] = 0` whenever `\sigma \ne (-1)^k`.
.. note::
For all the spaces of boundary symbols below, no work is done to determine
the cusps for G at creation time. Instead, cusps are added as they are
discovered in the course of computation. As a result, the rank of a space
can change as a computation proceeds.
REFERENCES:
- Merel, "Universal Fourier expansions of modular
forms." Springer LNM 1585 (1994), pg. 59-95.
- Stein, "Modular Forms, a computational approach." AMS (2007).
"""
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import
from six.moves import range
from sage.misc.misc import repr_lincomb
from sage.structure.richcmp import richcmp_method, richcmp
import sage.modules.free_module as free_module
from sage.modules.free_module_element import is_FreeModuleElement
import sage.modular.arithgroup.all as arithgroup
import sage.modular.cusps as cusps
import sage.modular.dirichlet as dirichlet
import sage.modular.hecke.all as hecke
from sage.modular.modsym.manin_symbol import ManinSymbol
import sage.rings.all as rings
import sage.arith.all as arith
from . import element
class BoundarySpaceElement(hecke.HeckeModuleElement):
def __init__(self, parent, x):
"""
Create a boundary symbol.
INPUT:
- ``parent`` - BoundarySpace; a space of boundary
modular symbols
- ``x`` - a dict with integer keys and values in the
base field of parent.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(32), sign=-1).boundary_space()
sage: B(Cusp(1,8))
[1/8]
sage: B.0
[1/8]
sage: type(B.0)
<class 'sage.modular.modsym.boundary.BoundarySpaceElement'>
"""
self.__x = x
self.__vec = parent.free_module()(x)
hecke.HeckeModuleElement.__init__(self, parent, self.__vec)
def coordinate_vector(self):
r"""
Return self as a vector on the QQ-vector space with basis
self.parent()._known_cusps().
EXAMPLES::
sage: B = ModularSymbols(18,4,sign=1).boundary_space()
sage: x = B(Cusp(1/2)) ; x
[1/2]
sage: x.coordinate_vector()
(1)
sage: ((18/5)*x).coordinate_vector()
(18/5)
sage: B(Cusp(0))
[0]
sage: x.coordinate_vector()
(1)
sage: x = B(Cusp(1/2)) ; x
[1/2]
sage: x.coordinate_vector()
(1, 0)
"""
return self.__vec
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0))._repr_()
'[0]'
sage: (-6*ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0)))._repr_()
'-6*[0]'
"""
g = self.parent()._known_gens_repr
return repr_lincomb([ (g[i], c) for i,c in self.__x.items() ])
# can't inherit arithmetic operations from HeckeModule, because basis
# dimension might change!
def _add_(self, other):
"""
Return self + other. Assumes that other is a BoundarySpaceElement.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16))
sage: x + y # indirect doctest
[2/7] + [13/16]
sage: x + x # indirect doctest
2*[2/7]
"""
z = dict(other.__x)
for i, c in self.__x.items():
if i in z:
z[i] += c
else:
z[i] = c
return BoundarySpaceElement(self.parent(), z)
def _sub_(self, other):
"""
Return self - other. Assumes that other is a BoundarySpaceElement.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16))
sage: x - y # indirect doctest
[2/7] - [13/16]
sage: x - x # indirect doctest
0
"""
z = dict(self.__x)
for i, c in other.__x.items():
if i in z:
z[i] -= c
else:
z[i] = -c
return BoundarySpaceElement(self.parent(), z)
def _rmul_(self, other):
"""
Return self \* other. Assumes that other can be coerced into
self.parent().base_ring().
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7))
sage: x*5 # indirect doctest
5*[2/7]
sage: x*-3/5 # indirect doctest
-3/5*[2/7]
"""
x = {}
for i, c in self.__x.items():
x[i] = c*other
return BoundarySpaceElement(self.parent(), x)
def _lmul_(self, other):
"""
Return other \* self. Assumes that other can be coerced into
self.parent().base_ring().
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(13/16))
sage: 11*x # indirect doctest
11*[13/16]
sage: 1/3*x # indirect doctest
1/3*[13/16]
"""
x = {}
for i, c in self.__x.items():
x[i] = other*c
return BoundarySpaceElement(self.parent(), x)
def __neg__(self):
"""
Return -self.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7))
sage: -x # indirect doctest
-[2/7]
sage: -x + x # indirect doctest
0
"""
return self*(-1)
@richcmp_method
class BoundarySpace(hecke.HeckeModule_generic):
def __init__(self,
group = arithgroup.Gamma0(1),
weight = 2,
sign = 0,
base_ring = rings.QQ,
character = None):
"""
Space of boundary symbols for a congruence subgroup of SL_2(Z).
This class is an abstract base class, so only derived classes
should be instantiated.
INPUT:
- ``weight`` - int, the weight
- ``group`` - arithgroup.congroup_generic.CongruenceSubgroup, a congruence
subgroup.
- ``sign`` - int, either -1, 0, or 1
- ``base_ring`` - rings.Ring (defaults to the
rational numbers)
EXAMPLES::
sage: B = ModularSymbols(Gamma0(11),2).boundary_space()
sage: isinstance(B, sage.modular.modsym.boundary.BoundarySpace)
True
sage: B == loads(dumps(B))
True
"""
weight = int(weight)
if weight <= 1:
raise ArithmeticError("weight must be at least 2")
if not arithgroup.is_CongruenceSubgroup(group):
raise TypeError("group must be a congruence subgroup")
sign = int(sign)
if not isinstance(base_ring, rings.Ring) and rings.is_CommutativeRing(base_ring):
raise TypeError("base_ring must be a commutative ring")
if character is None and arithgroup.is_Gamma0(group):
character = dirichlet.TrivialCharacter(group.level(), base_ring)
(self.__group, self.__weight, self.__character,
self.__sign, self.__base_ring) = (group, weight,
character, sign, base_ring)
self._known_gens = []
self._known_gens_repr = []
self._is_zero = []
hecke.HeckeModule_generic.__init__(self, base_ring, group.level())
def __richcmp__(self, other, op):
"""
EXAMPLES::
sage: B2 = ModularSymbols(11, 2).boundary_space()
sage: B4 = ModularSymbols(11, 4).boundary_space()
sage: B2 == B4
False
sage: B2 == ModularSymbols(17, 2).boundary_space()
False
"""
if type(self) is not type(other):
return NotImplemented
return richcmp((self.group(), self.weight(), self.character()),
(other.group(), other.weight(), other.character()),
op)
def _known_cusps(self):
"""
Return the list of cusps found so far.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(12), 4).boundary_space()
sage: B._known_cusps()
[]
sage: ls = [ B(Cusp(i,10)) for i in range(10) ]
sage: B._known_cusps()
[0, 1/10, 1/5]
"""
return list(self._known_gens)
def is_ambient(self):
"""
Return True if self is a space of boundary symbols associated to an
ambient space of modular symbols.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(6), 4)
sage: M.is_ambient()
True
sage: M.boundary_space().is_ambient()
True
"""
return True
def group(self):
"""
Return the congruence subgroup associated to this space of boundary
modular symbols.
EXAMPLES::
sage: ModularSymbols(GammaH(14,[9]), 2).boundary_space().group()
Congruence Subgroup Gamma_H(14) with H generated by [9]
"""
return self.__group
def weight(self):
"""
Return the weight of this space of boundary modular symbols.
EXAMPLES::
sage: ModularSymbols(Gamma1(9), 5).boundary_space().weight()
5
"""
return self.__weight
def character(self):
"""
Return the Dirichlet character associated to this space of boundary
modular symbols.
EXAMPLES::
sage: ModularSymbols(DirichletGroup(7).0, 6).boundary_space().character()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> zeta6
"""
return self.__character
def sign(self):
"""
Return the sign of the complex conjugation involution on this space
of boundary modular symbols.
EXAMPLES::
sage: ModularSymbols(13,2,sign=-1).boundary_space().sign()
-1
"""
return self.__sign
def gen(self, i=0):
"""
Return the i-th generator of this space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(24), 4).boundary_space()
sage: B.gen(0)
Traceback (most recent call last):
...
ValueError: only 0 generators known for Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(24) of weight 4 and over Rational Field
sage: B(Cusp(1/3))
[1/3]
sage: B.gen(0)
[1/3]
"""
if i >= len(self._known_gens) or i < 0:
raise ValueError("only %s generators known for %s"%(len(self._known_gens), self))
return BoundarySpaceElement(self, {i:1})
def __len__(self):
"""
Return the length of self, i.e. the dimension of the underlying
vector space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(36),4,sign=1).boundary_space()
sage: B.__len__()
0
sage: len(B)
0
sage: x = B(Cusp(0)) ; y = B(Cusp(oo)) ; len(B)
2
"""
return len(self._known_gens)
def free_module(self):
"""
Return the underlying free module for self.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(7), 5, sign=-1).boundary_space()
sage: B.free_module()
Sparse vector space of dimension 0 over Rational Field
sage: x = B(Cusp(0)) ; y = B(Cusp(1/7)) ; B.free_module()
Sparse vector space of dimension 2 over Rational Field
"""
return free_module.FreeModule(self.__base_ring, len(self._known_gens), sparse=True)
def rank(self):
"""
The rank of the space generated by boundary symbols that have been
found so far in the course of computing the boundary map.
.. warning::
This number may change as more elements are coerced into
this space!! (This is an implementation detail that will
likely change.)
EXAMPLES::
sage: M = ModularSymbols(Gamma0(72), 2) ; B = M.boundary_space()
sage: B.rank()
0
sage: _ = [ B(x) for x in M.basis() ]
sage: B.rank()
16
"""
return len(self._known_gens)
#####################################################################
# Coercion
#####################################################################
def _coerce_in_manin_symbol(self, x):
"""
Coerce the Manin symbol x into self. (That is, return the image of
x under the boundary map.)
Assumes that x is associated to the same space of modular symbols
as self.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(5), 4) ; B = M.boundary_space()
sage: [ B(x) for x in M.basis() ]
[-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]]
sage: [ B._coerce_in_manin_symbol(x) for x in M.manin_symbols_basis() ]
[-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]]
"""
i = x.i
alpha, beta = x.endpoints(self.level())
if self.weight() == 2:
return self(alpha) - self(beta)
if i == 0:
return self(alpha)
elif i == self.weight() - 2:
return -self(beta)
else:
return self(0)
def __call__(self, x):
"""
Coerce x into a boundary symbol space.
If x is a modular symbol (with the same group, weight, character,
sign, and base field), this returns the image of that modular
symbol under the boundary map.
EXAMPLES::
sage: M = ModularSymbols(Gamma0(15), 2) ; B = M.boundary_space()
sage: B(M.0)
[Infinity] - [0]
sage: B(Cusp(1))
[0]
sage: B(Cusp(oo))
[Infinity]
sage: B(7)
Traceback (most recent call last):
...
TypeError: Coercion of 7 (of type <type 'sage.rings.integer.Integer'>) into Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(15) of weight 2 and over Rational Field not (yet) defined.
"""
from .ambient import ModularSymbolsAmbient
if isinstance(x, int) and x == 0:
return BoundarySpaceElement(self, {})
elif isinstance(x, cusps.Cusp):
return self._coerce_cusp(x)
elif isinstance(x, ManinSymbol):
return self._coerce_in_manin_symbol(x)
elif element.is_ModularSymbolsElement(x):
M = x.parent()
if not isinstance(M, ModularSymbolsAmbient):
raise TypeError("x (=%s) must be an element of a space of modular symbols of type ModularSymbolsAmbient"%x)
if M.level() != self.level():
raise TypeError("x (=%s) must have level %s but has level %s"%(
x, self.level(), M.level()))
S = x.manin_symbol_rep()
if len(S) == 0:
return self(0)
return sum([c*self._coerce_in_manin_symbol(v) for c, v in S])
elif is_FreeModuleElement(x):
y = dict([(i,x[i]) for i in range(len(x))])
return BoundarySpaceElement(self, y)
raise TypeError("Coercion of %s (of type %s) into %s not (yet) defined."%(x, type(x), self))
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: sage.modular.modsym.boundary.BoundarySpace(Gamma0(3), 2)._repr_()
'Space of Boundary Modular Symbols of weight 2 for Congruence Subgroup Gamma0(3) with sign 0 and character [1] over Rational Field'
"""
return ("Space of Boundary Modular Symbols of weight %s for" + \
" %s with sign %s and character %s over %s")%(
self.weight(), self.group(), self.sign(),
self.character()._repr_short_(), self.base_ring())
def _cusp_index(self, cusp):
"""
Return the index of the first cusp in self._known_cusps()
equivalent to cusp, or -1 if cusp is not equivalent to any cusp
found so far.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(21), 4).boundary_space()
sage: B._cusp_index(Cusp(0))
-1
sage: _ = B(Cusp(oo))
sage: _ = B(Cusp(0))
sage: B._cusp_index(Cusp(0))
1
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
if self._is_equiv(cusp, g[i]):
return i
return -1
class BoundarySpace_wtk_g0(BoundarySpace):
def __init__(self, level, weight, sign, F):
"""
Initialize a space of boundary symbols of weight k for Gamma_0(N)
over base field F.
INPUT:
- ``level`` - int, the level
- ``weight`` - integer weight = 2.
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: B = ModularSymbols(Gamma0(2), 5).boundary_space()
sage: type(B)
<class 'sage.modular.modsym.boundary.BoundarySpace_wtk_g0_with_category'>
sage: B == loads(dumps(B))
True
"""
level = int(level)
sign = int(sign)
weight = int(weight)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma0(level),
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(97), 3).boundary_space()
sage: B._repr_()
'Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(97) of weight 3 and over Rational Field'
"""
return ("Space of Boundary Modular Symbols for %s of weight %s " + \
"and over %s")%(self.group(), self.weight(), self.base_ring())
def _coerce_cusp(self, c):
"""
Coerce the cusp c into this boundary symbol space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(17), 6).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B = ModularSymbols(Gamma0(17), 6, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B = ModularSymbols(Gamma0(16), 4).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[[0], [1/4], [1/2], [3/4]]
sage: B = ModularSymbols(Gamma0(16), 4, sign=1).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[[0], [1/4], [1/2], [1/4]]
sage: B = ModularSymbols(Gamma0(16), 4, sign=-1).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[0, [1/4], 0, -[1/4]]
"""
if self.weight()%2 != 0:
return self(0)
N = self.level()
# see if we've already found this cusp
i = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i:1})
# see if we've already found -c
sign = self.sign()
if sign != 0:
i2 = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# See if the new cusp is killed by sign relations. The
# relevant relations (for cusps other than 0 and Infinity)
# are:
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = [gamma * (u,v)]
# [(-u,v)] = sign * [(u,v)]
#
# So since k is always even on Gamma0, we have that [(u,v)] =
# 0 from the above relations exactly when (u,v) = gamma*(-u,v)
# and the sign is -1.
if sign == -1:
# NOTE: this code looks wrong. One should do the
# following:
#
# - if c is 0, if the sign is -1, append & return 0
# - if c is Infinity, then if the sign
# is not equal to (-1)**self.weight(), then
# append & return 0
# - otherwise, if the sign is -1, and c is
# equivalent to -c, append & return 0.
#
# Interestingly, the code below does precisely that.
# (It's important to recall that for Gamma0, odd weight
# spaces are 0.)
if self._is_equiv(c, -c):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
def _is_equiv(self, c1, c2):
"""
Determine whether or not c1 and c2 are equivalent for self.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(24), 6).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(oo))
False
sage: B._is_equiv(Cusp(0), Cusp(1))
True
"""
return c1.is_gamma0_equiv(c2, self.level())
class BoundarySpace_wtk_g1(BoundarySpace):
def __init__(self, level, weight, sign, F):
"""
Initialize a space of boundary modular symbols for Gamma1(N).
INPUT:
- ``level`` - int, the level
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - base ring
EXAMPLES::
sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_g1
sage: B = BoundarySpace_wtk_g1(17, 2, 0, QQ) ; B
Boundary Modular Symbols space for Gamma_1(17) of weight 2 over Rational Field
sage: B == loads(dumps(B))
True
"""
level = int(level)
sign = int(sign)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma1(level),
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(Gamma1(5), 3, sign=1).boundary_space()._repr_()
'Boundary Modular Symbols space for Gamma_1(5) of weight 3 over Rational Field'
"""
return ("Boundary Modular Symbols space for Gamma_1(%s) of weight %s " + \
"over %s")%(self.level(),self.weight(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return True if c1 and c2 are equivalent cusps for self, and False
otherwise.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(10), 4).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/5))
(False, 0)
sage: B._is_equiv(Cusp(4/5), Cusp(1/5))
(True, -1)
sage: B._is_equiv(Cusp(-4/5), Cusp(1/5))
(True, 1)
"""
return c1.is_gamma1_equiv(c2, self.level())
def _cusp_index(self, cusp):
"""
Returns a pair (i, t), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and t is 1 or -1
as cusp is Gamma1-equivalent to plus or minus
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: B = ModularSymbols(Gamma1(11),2).boundary_space()
sage: B._cusp_index(Cusp(1/11))
(-1, 0)
sage: B._cusp_index(Cusp(10/11))
(-1, 0)
sage: B._coerce_cusp(Cusp(1/11))
[1/11]
sage: B._cusp_index(Cusp(1/11))
(0, 1)
sage: B._cusp_index(Cusp(10/11))
(0, -1)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, eps = self._is_equiv(cusp, g[i])
if t:
return i, eps
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce a cusp into this boundary symbol space.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(4), 4).boundary_space()
sage: B._coerce_cusp(Cusp(1/2))
[1/2]
sage: B._coerce_cusp(Cusp(1/4))
[1/4]
sage: B._coerce_cusp(Cusp(3/4))
[1/4]
sage: B = ModularSymbols(Gamma1(5), 3, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B._coerce_cusp(Cusp(oo))
[Infinity]
sage: B = ModularSymbols(Gamma1(2), 3, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B._coerce_cusp(Cusp(oo))
0
sage: B = ModularSymbols(Gamma1(7), 3).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[[0], [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]]
sage: B._is_equiv(Cusp(1,6), Cusp(5,6))
(True, 1)
sage: B._is_equiv(Cusp(1,6), Cusp(0))
(True, -1)
sage: B(Cusp(0))
[0]
sage: B = ModularSymbols(Gamma1(7), 3, sign=1).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[[0], 0, 0, 0, 0, 0, 0]
sage: B = ModularSymbols(Gamma1(7), 3, sign=-1).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[0, [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]]
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps**k})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
else:
return BoundarySpaceElement(self, {i2:sign*(eps**k)})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# Does cusp class vanish because of - relations? (See note at top
# of file.)
if k % 2 != 0:
(u, v) = (c.numerator(), c.denominator())
if (2*v) % N == 0:
if (2*u) % v.gcd(N) == 0:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps * (-1)^k [(-u,v)]
#
# where, in the last line, (u,v) is Gamma1-equivalent to
# (-u,v) or (u,-v) as eps is 1 or -1.
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when:
#
# - (u,v) is Gamma1-equivalent to (-u,v) or (u,-v), and
# - eps is 1 and sign is -1, or eps is -1 and sign is not
# (-1)^k.
#
if sign:
if c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_zero():
if (sign == -1):
self._is_zero.append(len(g)-1)
return self(0)
else:
t, eps = self._is_equiv(c, -c)
if t and ((eps == 1 and sign == -1) or \
(eps == -1 and sign != (-1)**self.weight())):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
class BoundarySpace_wtk_gamma_h(BoundarySpace):
def __init__(self, group, weight, sign, F):
"""
Initialize a space of boundary modular symbols for GammaH(N).
INPUT:
- ``group`` - congruence subgroup Gamma_H(N).
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - base ring
EXAMPLES::
sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_gamma_h
sage: B = BoundarySpace_wtk_gamma_h(GammaH(13,[3]), 2, 0, QQ) ; B
Boundary Modular Symbols space for Congruence Subgroup Gamma_H(13) with H generated by [3] of weight 2 over Rational Field
sage: B == loads(dumps(B))
True
"""
sign = int(sign)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
BoundarySpace.__init__(self,
weight = weight,
group = group,
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(GammaH(7,[2]), 4).boundary_space()._repr_()
'Boundary Modular Symbols space for Congruence Subgroup Gamma_H(7) with H generated by [2] of weight 4 over Rational Field'
"""
return ("Boundary Modular Symbols space for %s of weight %s " + \
"over %s")%(self.group(),self.weight(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return a pair of the form (b, t), where b is True if c1 and c2 are
equivalent cusps for self, and False otherwise, and t gives extra
information about the equivalence between c1 and c2.
EXAMPLES::
sage: B = ModularSymbols(GammaH(7,[2]), 4).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/7))
(False, 0)
sage: B._is_equiv(Cusp(2/7), Cusp(1/7))
(True, 1)
sage: B._is_equiv(Cusp(3/7), Cusp(1/7))
(True, -1)
"""
return c1.is_gamma_h_equiv(c2, self.group())
def _cusp_index(self, cusp):
"""
Returns a pair (i, t), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and t is 1 or -1
as cusp is GammaH-equivalent to plus or minus
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: M = ModularSymbols(GammaH(9,[4]), 3)
sage: B = M.boundary_space()
sage: B._cusp_index(Cusp(0))
(-1, 0)
sage: _ = [ B(x) for x in M.basis() ]
sage: B._cusp_index(Cusp(0))
(1, -1)
sage: B._cusp_index(Cusp(5/6))
(3, 1)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, eps = self._is_equiv(cusp, g[i])
if t:
return i, eps
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce the cusp c into self.
EXAMPLES::
sage: B = ModularSymbols(GammaH(10,[9]), 2).boundary_space()
sage: B(Cusp(0))
[0]
sage: B(Cusp(1/3))
[1/3]
sage: B(Cusp(1/13))
[1/3]
sage: B = ModularSymbols(GammaH(25, [6]), 2).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
::
sage: B = ModularSymbols(GammaH(11,[3]), 3).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[[0],
[1/11],
-[1/11],
[1/11],
[1/11],
[1/11],
-[1/11],
-[1/11],
-[1/11],
[1/11],
-[1/11]]
sage: B._is_equiv(Cusp(0), Cusp(1,11))
(False, 0)
sage: B._is_equiv(Cusp(oo), Cusp(1,11))
(True, 1)
sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=1).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=-1).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[0,
[1/11],
-[1/11],
[1/11],
[1/11],
[1/11],
-[1/11],
-[1/11],
-[1/11],
[1/11],
-[1/11]]
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps**k})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign*(eps**k)})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# Does cusp class vanish because of - relations? (See note at top
# of file.)
if k % 2 != 0:
(u, v) = (c.numerator(), c.denominator())
if (2*v) % N == 0:
if (2*u) % v.gcd(N) == 0:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps * (-1)^k [(-u,v)]
#
# where, in the last line, (u,v) is GammaH-equivalent to
# (-u,v) or (u,-v) as eps is 1 or -1.
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when:
#
# - (u,v) is GammaH-equivalent to (-u,v) or (u,-v), and
# - eps is 1 and sign is -1, or eps is -1 and sign is not
# (-1)^k.
#
# (Notice that while this description looks identical to that
# of Gamma1, it differs in that the condition of being GammaH
# equivalent is weaker than that of being Gamma1 equivalent
# when H is larger than {1}.)
#
if sign:
if c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_zero():
if (sign == -1):
self._is_zero.append(len(g)-1)
return self(0)
else:
t, eps = self._is_equiv(c, -c)
if t and ((eps == 1 and sign == -1) or \
(eps == -1 and sign != (-1)**self.weight())):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
class BoundarySpace_wtk_eps(BoundarySpace):
def __init__(self, eps, weight, sign=0):
"""
Space of boundary modular symbols with given weight, character, and
sign.
INPUT:
- ``eps`` - dirichlet.DirichletCharacter, the
"Nebentypus" character.
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(6).0, 4).boundary_space() ; B
Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field
sage: type(B)
<class 'sage.modular.modsym.boundary.BoundarySpace_wtk_eps_with_category'>
sage: B == loads(dumps(B))
True
"""
level = eps.modulus()
sign = int(sign)
self.__eps = eps
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma1(level),
sign = sign,
base_ring = eps.base_ring(),
character = eps)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(DirichletGroup(6).0, 4).boundary_space()._repr_()
'Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field'
"""
return ("Boundary Modular Symbols space of level %s, weight %s, character %s " + \
"and dimension %s over %s")%(self.level(), self.weight(),
self.character()._repr_short_(), self.rank(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return a pair (b, t), where b is True if c1 and c2 are equivalent
cusps for self, and False otherwise, and t gives extra information
about the equivalence of c1 and c2.
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(12).1, 3).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/3))
(False, None)
sage: B._is_equiv(Cusp(2/3), Cusp(1/3))
(True, 5)
sage: B._is_equiv(Cusp(3/4), Cusp(1/4))
(True, 7)
"""
return c1.is_gamma0_equiv(c2, self.level(), transformation=True)
def _cusp_index(self, cusp):
"""
Returns a pair (i, s), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and such that
cusp is Gamma0-equivalent to self.character()(s) times
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(11).0**3, 5).boundary_space()
sage: B._cusp_index(Cusp(0))
(-1, 0)
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B._cusp_index(Cusp(0))
(0, 1)
sage: B._coerce_cusp(Cusp(1,11))
[1/11]
sage: B._cusp_index(Cusp(2,11))
(1, -zeta10^2)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, s = self._is_equiv(cusp, g[i])
if t:
return i, self.__eps(s)
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce the cusp c into self.
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=0).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[[0],
[1/13],
-zeta4*[1/13],
[1/13],
-[1/13],
-zeta4*[1/13],
-zeta4*[1/13],
zeta4*[1/13],
zeta4*[1/13],
[1/13],
-[1/13],
zeta4*[1/13],
-[1/13]]
sage: B._is_equiv(Cusp(oo), Cusp(1,13))
(True, 1)
sage: B._is_equiv(Cusp(0), Cusp(1,13))
(False, None)
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=1).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: B._coerce_cusp(Cusp(oo))
0
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=-1).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[0,
[1/13],
-zeta4*[1/13],
[1/13],
-[1/13],
-zeta4*[1/13],
-zeta4*[1/13],
zeta4*[1/13],
zeta4*[1/13],
[1/13],
-[1/13],
zeta4*[1/13],
-[1/13]]
sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign*eps})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
###############################################################
# TODO?: This is a very dumb way to check for solutions to an
# equation (see Prop 2.30 of Stein's Ph.D. thesis for which
# equation); however, computing the cusp equivalence for the
# boundary map takes much less time than computing the kernel
# of the boundary map, so it's not worth optimizing this now.
###############################################################
(u, v) = (c.numerator(), c.denominator())
gcd = arith.gcd
d = gcd(v,N)
x = N//d
for j in range(d):
alpha = 1 - j*x
if gcd(alpha, N) == 1:
if (v*(1-alpha))%N == 0 and (u*(1-alpha))%d == 0:
if self.__eps(alpha) != 1:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps(d) * [(-u,v)]
#
# where, in the last line, eps is the character defining
# our space, and [a,b;c,d] takes (u,v) to (-u,v).
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when the sign is not
# equal to eps(d).
#
if sign:
if c.is_zero():
if sign == -1:
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
else:
t, s = self._is_equiv(c, -c)
if t:
if sign != self.__eps(s):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
| # -*- coding: utf-8 -*-
r"""
Space of boundary modular symbols
Used mainly for computing the cuspidal subspace of modular symbols. The space
of boundary symbols of sign 0 is isomorphic as a Hecke module to the dual of
the space of Eisenstein series, but this does not give a useful method of
computing Eisenstein series, since there is no easy way to extract the constant
terms.
We represent boundary modular symbols as a sum of Manin symbols of the form
`[P, u/v]`, where `u/v` is a cusp for our group `G`. The group of boundary
modular symbols naturally embeds into a vector space `B_k(G)` (see Stein,
section 8.4, or Merel, section 1.4, where this space is called `\CC[\Gamma
\backslash \QQ]_k`, for a definition), which is a finite dimensional `\QQ`
vector space of dimension equal to the number of cusps for `G`. The embedding
takes `[P, u/v]` to `P(u,v)\cdot [(u,v)]`. We represent the basis vectors by
pairs `[(u,v)]` with u, v coprime. On `B_k(G)`, we have the relations
.. MATH::
[\gamma \cdot (u,v)] = [(u,v)]
for all `\gamma \in G` and
.. MATH::
[(\lambda u, \lambda v)] = \operatorname{sign}(\lambda)^k [(u,v)]
for all `\lambda \in \QQ^\times`.
It's possible for these relations to kill a class, i.e., for a pair `[(u,v)]`
to be 0. For example, when `N=4` and `k=3` then `(-1,-2)` is equivalent mod
`\Gamma_1(4)` to `(1,2)` since `2=-2 \bmod 4` and `1=-1 \bmod 2`. But since `k`
is odd, `[(-1,-2)]` is also equivalent to `-[(1,2)]`. Thus this symbol is
equivalent to its negative, hence 0 (notice that this wouldn't be the case in
characteristic 2). This happens for any irregular cusp when the weight is odd;
there are no irregular cusps on `\Gamma_1(N)` except when `N = 4`, but there
can be more on `\Gamma_H` groups. See also prop 2.30 of Stein's Ph.D. thesis.
In addition, in the case that our space is of sign `\sigma = 1` or `-1`, we
also have the relation `[(-u,v)] = \sigma \cdot [(u,v)]`. This relation can
also combine with the above to kill a cusp class - for instance, take (u,v) =
(1,3) for `\Gamma_1(5)`. Then since the cusp `\tfrac{1}{3}` is
`\Gamma_1(5)`-equivalent to the cusp `-\tfrac{1}{3}`, we have that `[(1,3)] =
[(-1,3)]`. Now, on the minus subspace, we also have that `[(-1,3)] = -[(1,3)]`,
which means this class must vanish. Notice that this cannot be used to show
that `[(1,0)]` or `[(0,1)]` is 0.
.. note::
Special care must be taken when working with the images of the cusps 0 and
`\infty` in `B_k(G)`. For all cusps *except* 0 and `\infty`, multiplying the
cusp by -1 corresponds to taking `[(u,v)]` to `[(-u,v)]` in `B_k(G)`. This
means that `[(u,v)]` is equivalent to `[(-u,v)]` whenever `\tfrac{u}{v}` is
equivalent to `-\tfrac{u}{v}`, except in the case of 0 and `\infty`. We
have the following conditions for `[(1,0)]` and `[(0,1)]`:
- `[(0,1)] = \sigma \cdot [(0,1)]`, so `[(0,1)]` is 0 exactly when `\sigma =
-1`.
- `[(1,0)] = \sigma \cdot [(-1,0)]` and `[(1,0)] = (-1)^k [(-1,0)]`, so
`[(1,0)] = 0` whenever `\sigma \ne (-1)^k`.
.. note::
For all the spaces of boundary symbols below, no work is done to determine
the cusps for G at creation time. Instead, cusps are added as they are
discovered in the course of computation. As a result, the rank of a space
can change as a computation proceeds.
REFERENCES:
- Merel, "Universal Fourier expansions of modular
forms." Springer LNM 1585 (1994), pg. 59-95.
- Stein, "Modular Forms, a computational approach." AMS (2007).
"""
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import
from six.moves import range
from sage.misc.misc import repr_lincomb
from sage.structure.richcmp import richcmp_method, richcmp
import sage.modules.free_module as free_module
from sage.modules.free_module_element import is_FreeModuleElement
import sage.modular.arithgroup.all as arithgroup
import sage.modular.cusps as cusps
import sage.modular.dirichlet as dirichlet
import sage.modular.hecke.all as hecke
from sage.modular.modsym.manin_symbol import ManinSymbol
import sage.rings.all as rings
import sage.arith.all as arith
from . import element
class BoundarySpaceElement(hecke.HeckeModuleElement):
def __init__(self, parent, x):
"""
Create a boundary symbol.
INPUT:
- ``parent`` - BoundarySpace; a space of boundary
modular symbols
- ``x`` - a dict with integer keys and values in the
base field of parent.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(32), sign=-1).boundary_space()
sage: B(Cusp(1,8))
[1/8]
sage: B.0
[1/8]
sage: type(B.0)
<class 'sage.modular.modsym.boundary.BoundarySpaceElement'>
"""
self.__x = x
self.__vec = parent.free_module()(x)
hecke.HeckeModuleElement.__init__(self, parent, self.__vec)
def coordinate_vector(self):
r"""
Return self as a vector on the QQ-vector space with basis
self.parent()._known_cusps().
EXAMPLES::
sage: B = ModularSymbols(18,4,sign=1).boundary_space()
sage: x = B(Cusp(1/2)) ; x
[1/2]
sage: x.coordinate_vector()
(1)
sage: ((18/5)*x).coordinate_vector()
(18/5)
sage: B(Cusp(0))
[0]
sage: x.coordinate_vector()
(1)
sage: x = B(Cusp(1/2)) ; x
[1/2]
sage: x.coordinate_vector()
(1, 0)
"""
return self.__vec
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0))._repr_()
'[0]'
sage: (-6*ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0)))._repr_()
'-6*[0]'
"""
g = self.parent()._known_gens_repr
return repr_lincomb([ (g[i], c) for i,c in self.__x.items() ])
# can't inherit arithmetic operations from HeckeModule, because basis
# dimension might change!
def _add_(self, other):
"""
Return self + other. Assumes that other is a BoundarySpaceElement.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16))
sage: x + y # indirect doctest
[2/7] + [13/16]
sage: x + x # indirect doctest
2*[2/7]
"""
z = dict(other.__x)
for i, c in self.__x.items():
if i in z:
z[i] += c
else:
z[i] = c
return BoundarySpaceElement(self.parent(), z)
def _sub_(self, other):
"""
Return self - other. Assumes that other is a BoundarySpaceElement.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16))
sage: x - y # indirect doctest
[2/7] - [13/16]
sage: x - x # indirect doctest
0
"""
z = dict(self.__x)
for i, c in other.__x.items():
if i in z:
z[i] -= c
else:
z[i] = -c
return BoundarySpaceElement(self.parent(), z)
def _rmul_(self, other):
"""
Return self \* other. Assumes that other can be coerced into
self.parent().base_ring().
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7))
sage: x*5 # indirect doctest
5*[2/7]
sage: x*-3/5 # indirect doctest
-3/5*[2/7]
"""
x = {}
for i, c in self.__x.items():
x[i] = c*other
return BoundarySpaceElement(self.parent(), x)
def _lmul_(self, other):
"""
Return other \* self. Assumes that other can be coerced into
self.parent().base_ring().
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(13/16))
sage: 11*x # indirect doctest
11*[13/16]
sage: 1/3*x # indirect doctest
1/3*[13/16]
"""
x = {}
for i, c in self.__x.items():
x[i] = other*c
return BoundarySpaceElement(self.parent(), x)
def __neg__(self):
"""
Return -self.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(16), 4).boundary_space()
sage: x = B(Cusp(2/7))
sage: -x # indirect doctest
-[2/7]
sage: -x + x # indirect doctest
0
"""
return self*(-1)
@richcmp_method
class BoundarySpace(hecke.HeckeModule_generic):
def __init__(self,
group = arithgroup.Gamma0(1),
weight = 2,
sign = 0,
base_ring = rings.QQ,
character = None):
"""
Space of boundary symbols for a congruence subgroup of SL_2(Z).
This class is an abstract base class, so only derived classes
should be instantiated.
INPUT:
- ``weight`` - int, the weight
- ``group`` - arithgroup.congroup_generic.CongruenceSubgroup, a congruence
subgroup.
- ``sign`` - int, either -1, 0, or 1
- ``base_ring`` - rings.Ring (defaults to the
rational numbers)
EXAMPLES::
sage: B = ModularSymbols(Gamma0(11),2).boundary_space()
sage: isinstance(B, sage.modular.modsym.boundary.BoundarySpace)
True
sage: B == loads(dumps(B))
True
"""
weight = int(weight)
if weight <= 1:
raise ArithmeticError("weight must be at least 2")
if not arithgroup.is_CongruenceSubgroup(group):
raise TypeError("group must be a congruence subgroup")
sign = int(sign)
if not isinstance(base_ring, rings.Ring) and rings.is_CommutativeRing(base_ring):
raise TypeError("base_ring must be a commutative ring")
if character is None and arithgroup.is_Gamma0(group):
character = dirichlet.TrivialCharacter(group.level(), base_ring)
(self.__group, self.__weight, self.__character,
self.__sign, self.__base_ring) = (group, weight,
character, sign, base_ring)
self._known_gens = []
self._known_gens_repr = []
self._is_zero = []
hecke.HeckeModule_generic.__init__(self, base_ring, group.level())
def __richcmp__(self, other, op):
"""
EXAMPLES::
sage: B2 = ModularSymbols(11, 2).boundary_space()
sage: B4 = ModularSymbols(11, 4).boundary_space()
sage: B2 == B4
False
sage: B2 == ModularSymbols(17, 2).boundary_space()
False
"""
if type(self) is not type(other):
return NotImplemented
return richcmp((self.group(), self.weight(), self.character()),
(other.group(), other.weight(), other.character()),
op)
def _known_cusps(self):
"""
Return the list of cusps found so far.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(12), 4).boundary_space()
sage: B._known_cusps()
[]
sage: ls = [ B(Cusp(i,10)) for i in range(10) ]
sage: B._known_cusps()
[0, 1/10, 1/5]
"""
return list(self._known_gens)
def is_ambient(self):
"""
Return True if self is a space of boundary symbols associated to an
ambient space of modular symbols.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(6), 4)
sage: M.is_ambient()
True
sage: M.boundary_space().is_ambient()
True
"""
return True
def group(self):
"""
Return the congruence subgroup associated to this space of boundary
modular symbols.
EXAMPLES::
sage: ModularSymbols(GammaH(14,[9]), 2).boundary_space().group()
Congruence Subgroup Gamma_H(14) with H generated by [9]
"""
return self.__group
def weight(self):
"""
Return the weight of this space of boundary modular symbols.
EXAMPLES::
sage: ModularSymbols(Gamma1(9), 5).boundary_space().weight()
5
"""
return self.__weight
def character(self):
"""
Return the Dirichlet character associated to this space of boundary
modular symbols.
EXAMPLES::
sage: ModularSymbols(DirichletGroup(7).0, 6).boundary_space().character()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> zeta6
"""
return self.__character
def sign(self):
"""
Return the sign of the complex conjugation involution on this space
of boundary modular symbols.
EXAMPLES::
sage: ModularSymbols(13,2,sign=-1).boundary_space().sign()
-1
"""
return self.__sign
def gen(self, i=0):
"""
Return the i-th generator of this space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(24), 4).boundary_space()
sage: B.gen(0)
Traceback (most recent call last):
...
ValueError: only 0 generators known for Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(24) of weight 4 and over Rational Field
sage: B(Cusp(1/3))
[1/3]
sage: B.gen(0)
[1/3]
"""
if i >= len(self._known_gens) or i < 0:
raise ValueError("only %s generators known for %s"%(len(self._known_gens), self))
return BoundarySpaceElement(self, {i:1})
def __len__(self):
"""
Return the length of self, i.e. the dimension of the underlying
vector space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(36),4,sign=1).boundary_space()
sage: B.__len__()
0
sage: len(B)
0
sage: x = B(Cusp(0)) ; y = B(Cusp(oo)) ; len(B)
2
"""
return len(self._known_gens)
def free_module(self):
"""
Return the underlying free module for self.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(7), 5, sign=-1).boundary_space()
sage: B.free_module()
Sparse vector space of dimension 0 over Rational Field
sage: x = B(Cusp(0)) ; y = B(Cusp(1/7)) ; B.free_module()
Sparse vector space of dimension 2 over Rational Field
"""
return free_module.FreeModule(self.__base_ring, len(self._known_gens), sparse=True)
def rank(self):
"""
The rank of the space generated by boundary symbols that have been
found so far in the course of computing the boundary map.
.. warning::
This number may change as more elements are coerced into
this space!! (This is an implementation detail that will
likely change.)
EXAMPLES::
sage: M = ModularSymbols(Gamma0(72), 2) ; B = M.boundary_space()
sage: B.rank()
0
sage: _ = [ B(x) for x in M.basis() ]
sage: B.rank()
16
"""
return len(self._known_gens)
#####################################################################
# Coercion
#####################################################################
def _coerce_in_manin_symbol(self, x):
"""
Coerce the Manin symbol x into self. (That is, return the image of
x under the boundary map.)
Assumes that x is associated to the same space of modular symbols
as self.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(5), 4) ; B = M.boundary_space()
sage: [ B(x) for x in M.basis() ]
[-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]]
sage: [ B._coerce_in_manin_symbol(x) for x in M.manin_symbols_basis() ]
[-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]]
"""
i = x.i
alpha, beta = x.endpoints(self.level())
if self.weight() == 2:
return self(alpha) - self(beta)
if i == 0:
return self(alpha)
elif i == self.weight() - 2:
return -self(beta)
else:
return self(0)
def __call__(self, x):
"""
Coerce x into a boundary symbol space.
If x is a modular symbol (with the same group, weight, character,
sign, and base field), this returns the image of that modular
symbol under the boundary map.
EXAMPLES::
sage: M = ModularSymbols(Gamma0(15), 2) ; B = M.boundary_space()
sage: B(M.0)
[Infinity] - [0]
sage: B(Cusp(1))
[0]
sage: B(Cusp(oo))
[Infinity]
sage: B(7)
Traceback (most recent call last):
...
TypeError: Coercion of 7 (of type <type 'sage.rings.integer.Integer'>) into Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(15) of weight 2 and over Rational Field not (yet) defined.
"""
from .ambient import ModularSymbolsAmbient
if isinstance(x, int) and x == 0:
return BoundarySpaceElement(self, {})
elif isinstance(x, cusps.Cusp):
return self._coerce_cusp(x)
elif isinstance(x, ManinSymbol):
return self._coerce_in_manin_symbol(x)
elif element.is_ModularSymbolsElement(x):
M = x.parent()
if not isinstance(M, ModularSymbolsAmbient):
raise TypeError("x (=%s) must be an element of a space of modular symbols of type ModularSymbolsAmbient"%x)
if M.level() != self.level():
raise TypeError("x (=%s) must have level %s but has level %s"%(
x, self.level(), M.level()))
S = x.manin_symbol_rep()
if len(S) == 0:
return self(0)
return sum([c*self._coerce_in_manin_symbol(v) for c, v in S])
elif is_FreeModuleElement(x):
y = dict([(i,x[i]) for i in range(len(x))])
return BoundarySpaceElement(self, y)
raise TypeError("Coercion of %s (of type %s) into %s not (yet) defined."%(x, type(x), self))
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: sage.modular.modsym.boundary.BoundarySpace(Gamma0(3), 2)._repr_()
'Space of Boundary Modular Symbols of weight 2 for Congruence Subgroup Gamma0(3) with sign 0 and character [1] over Rational Field'
"""
return ("Space of Boundary Modular Symbols of weight %s for" + \
" %s with sign %s and character %s over %s")%(
self.weight(), self.group(), self.sign(),
self.character()._repr_short_(), self.base_ring())
def _cusp_index(self, cusp):
"""
Return the index of the first cusp in self._known_cusps()
equivalent to cusp, or -1 if cusp is not equivalent to any cusp
found so far.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(21), 4).boundary_space()
sage: B._cusp_index(Cusp(0))
-1
sage: _ = B(Cusp(oo))
sage: _ = B(Cusp(0))
sage: B._cusp_index(Cusp(0))
1
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
if self._is_equiv(cusp, g[i]):
return i
return -1
class BoundarySpace_wtk_g0(BoundarySpace):
def __init__(self, level, weight, sign, F):
"""
Initialize a space of boundary symbols of weight k for Gamma_0(N)
over base field F.
INPUT:
- ``level`` - int, the level
- ``weight`` - integer weight = 2.
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: B = ModularSymbols(Gamma0(2), 5).boundary_space()
sage: type(B)
<class 'sage.modular.modsym.boundary.BoundarySpace_wtk_g0_with_category'>
sage: B == loads(dumps(B))
True
"""
level = int(level)
sign = int(sign)
weight = int(weight)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma0(level),
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(97), 3).boundary_space()
sage: B._repr_()
'Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(97) of weight 3 and over Rational Field'
"""
return ("Space of Boundary Modular Symbols for %s of weight %s " + \
"and over %s")%(self.group(), self.weight(), self.base_ring())
def _coerce_cusp(self, c):
"""
Coerce the cusp c into this boundary symbol space.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(17), 6).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B = ModularSymbols(Gamma0(17), 6, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B = ModularSymbols(Gamma0(16), 4).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[[0], [1/4], [1/2], [3/4]]
sage: B = ModularSymbols(Gamma0(16), 4, sign=1).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[[0], [1/4], [1/2], [1/4]]
sage: B = ModularSymbols(Gamma0(16), 4, sign=-1).boundary_space()
sage: [ B(Cusp(i,4)) for i in range(4) ]
[0, [1/4], 0, -[1/4]]
"""
if self.weight()%2 != 0:
return self(0)
N = self.level()
# see if we've already found this cusp
i = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i:1})
# see if we've already found -c
sign = self.sign()
if sign != 0:
i2 = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# See if the new cusp is killed by sign relations. The
# relevant relations (for cusps other than 0 and Infinity)
# are:
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = [gamma * (u,v)]
# [(-u,v)] = sign * [(u,v)]
#
# So since k is always even on Gamma0, we have that [(u,v)] =
# 0 from the above relations exactly when (u,v) = gamma*(-u,v)
# and the sign is -1.
if sign == -1:
# NOTE: this code looks wrong. One should do the
# following:
#
# - if c is 0, if the sign is -1, append & return 0
# - if c is Infinity, then if the sign
# is not equal to (-1)**self.weight(), then
# append & return 0
# - otherwise, if the sign is -1, and c is
# equivalent to -c, append & return 0.
#
# Interestingly, the code below does precisely that.
# (It's important to recall that for Gamma0, odd weight
# spaces are 0.)
if self._is_equiv(c, -c):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
def _is_equiv(self, c1, c2):
"""
Determine whether or not c1 and c2 are equivalent for self.
EXAMPLES::
sage: B = ModularSymbols(Gamma0(24), 6).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(oo))
False
sage: B._is_equiv(Cusp(0), Cusp(1))
True
"""
return c1.is_gamma0_equiv(c2, self.level())
class BoundarySpace_wtk_g1(BoundarySpace):
def __init__(self, level, weight, sign, F):
"""
Initialize a space of boundary modular symbols for Gamma1(N).
INPUT:
- ``level`` - int, the level
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - base ring
EXAMPLES::
sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_g1
sage: B = BoundarySpace_wtk_g1(17, 2, 0, QQ) ; B
Boundary Modular Symbols space for Gamma_1(17) of weight 2 over Rational Field
sage: B == loads(dumps(B))
True
"""
level = int(level)
sign = int(sign)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma1(level),
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(Gamma1(5), 3, sign=1).boundary_space()._repr_()
'Boundary Modular Symbols space for Gamma_1(5) of weight 3 over Rational Field'
"""
return ("Boundary Modular Symbols space for Gamma_1(%s) of weight %s " + \
"over %s")%(self.level(),self.weight(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return True if c1 and c2 are equivalent cusps for self, and False
otherwise.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(10), 4).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/5))
(False, 0)
sage: B._is_equiv(Cusp(4/5), Cusp(1/5))
(True, -1)
sage: B._is_equiv(Cusp(-4/5), Cusp(1/5))
(True, 1)
"""
return c1.is_gamma1_equiv(c2, self.level())
def _cusp_index(self, cusp):
"""
Returns a pair (i, t), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and t is 1 or -1
as cusp is Gamma1-equivalent to plus or minus
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: B = ModularSymbols(Gamma1(11),2).boundary_space()
sage: B._cusp_index(Cusp(1/11))
(-1, 0)
sage: B._cusp_index(Cusp(10/11))
(-1, 0)
sage: B._coerce_cusp(Cusp(1/11))
[1/11]
sage: B._cusp_index(Cusp(1/11))
(0, 1)
sage: B._cusp_index(Cusp(10/11))
(0, -1)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, eps = self._is_equiv(cusp, g[i])
if t:
return i, eps
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce a cusp into this boundary symbol space.
EXAMPLES::
sage: B = ModularSymbols(Gamma1(4), 4).boundary_space()
sage: B._coerce_cusp(Cusp(1/2))
[1/2]
sage: B._coerce_cusp(Cusp(1/4))
[1/4]
sage: B._coerce_cusp(Cusp(3/4))
[1/4]
sage: B = ModularSymbols(Gamma1(5), 3, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B._coerce_cusp(Cusp(oo))
[Infinity]
sage: B = ModularSymbols(Gamma1(2), 3, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
sage: B._coerce_cusp(Cusp(oo))
0
sage: B = ModularSymbols(Gamma1(7), 3).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[[0], [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]]
sage: B._is_equiv(Cusp(1,6), Cusp(5,6))
(True, 1)
sage: B._is_equiv(Cusp(1,6), Cusp(0))
(True, -1)
sage: B(Cusp(0))
[0]
sage: B = ModularSymbols(Gamma1(7), 3, sign=1).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[[0], 0, 0, 0, 0, 0, 0]
sage: B = ModularSymbols(Gamma1(7), 3, sign=-1).boundary_space()
sage: [ B(Cusp(i,7)) for i in range(7) ]
[0, [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]]
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps**k})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
else:
return BoundarySpaceElement(self, {i2:sign*(eps**k)})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# Does cusp class vanish because of - relations? (See note at top
# of file.)
if k % 2 != 0:
(u, v) = (c.numerator(), c.denominator())
if (2*v) % N == 0:
if (2*u) % v.gcd(N) == 0:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps * (-1)^k [(-u,v)]
#
# where, in the last line, (u,v) is Gamma1-equivalent to
# (-u,v) or (u,-v) as eps is 1 or -1.
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when:
#
# - (u,v) is Gamma1-equivalent to (-u,v) or (u,-v), and
# - eps is 1 and sign is -1, or eps is -1 and sign is not
# (-1)^k.
#
if sign:
if c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_zero():
if (sign == -1):
self._is_zero.append(len(g)-1)
return self(0)
else:
t, eps = self._is_equiv(c, -c)
if t and ((eps == 1 and sign == -1) or \
(eps == -1 and sign != (-1)**self.weight())):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
class BoundarySpace_wtk_gamma_h(BoundarySpace):
def __init__(self, group, weight, sign, F):
"""
Initialize a space of boundary modular symbols for GammaH(N).
INPUT:
- ``group`` - congruence subgroup Gamma_H(N).
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - base ring
EXAMPLES::
sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_gamma_h
sage: B = BoundarySpace_wtk_gamma_h(GammaH(13,[3]), 2, 0, QQ) ; B
Boundary Modular Symbols space for Congruence Subgroup Gamma_H(13) with H generated by [3] of weight 2 over Rational Field
sage: B == loads(dumps(B))
True
"""
sign = int(sign)
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
BoundarySpace.__init__(self,
weight = weight,
group = group,
sign = sign,
base_ring = F)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(GammaH(7,[2]), 4).boundary_space()._repr_()
'Boundary Modular Symbols space for Congruence Subgroup Gamma_H(7) with H generated by [2] of weight 4 over Rational Field'
"""
return ("Boundary Modular Symbols space for %s of weight %s " + \
"over %s")%(self.group(),self.weight(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return a pair of the form (b, t), where b is True if c1 and c2 are
equivalent cusps for self, and False otherwise, and t gives extra
information about the equivalence between c1 and c2.
EXAMPLES::
sage: B = ModularSymbols(GammaH(7,[2]), 4).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/7))
(False, 0)
sage: B._is_equiv(Cusp(2/7), Cusp(1/7))
(True, 1)
sage: B._is_equiv(Cusp(3/7), Cusp(1/7))
(True, -1)
"""
return c1.is_gamma_h_equiv(c2, self.group())
def _cusp_index(self, cusp):
"""
Returns a pair (i, t), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and t is 1 or -1
as cusp is GammaH-equivalent to plus or minus
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: M = ModularSymbols(GammaH(9,[4]), 3)
sage: B = M.boundary_space()
sage: B._cusp_index(Cusp(0))
(-1, 0)
sage: _ = [ B(x) for x in M.basis() ]
sage: B._cusp_index(Cusp(0))
(1, -1)
sage: B._cusp_index(Cusp(5/6))
(3, 1)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, eps = self._is_equiv(cusp, g[i])
if t:
return i, eps
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce the cusp c into self.
EXAMPLES::
sage: B = ModularSymbols(GammaH(10,[9]), 2).boundary_space()
sage: B(Cusp(0))
[0]
sage: B(Cusp(1/3))
[1/3]
sage: B(Cusp(1/13))
[1/3]
sage: B = ModularSymbols(GammaH(25, [6]), 2).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
::
sage: B = ModularSymbols(GammaH(11,[3]), 3).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[[0],
[1/11],
-[1/11],
[1/11],
[1/11],
[1/11],
-[1/11],
-[1/11],
-[1/11],
[1/11],
-[1/11]]
sage: B._is_equiv(Cusp(0), Cusp(1,11))
(False, 0)
sage: B._is_equiv(Cusp(oo), Cusp(1,11))
(True, 1)
sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=1).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=-1).boundary_space()
sage: [ B(Cusp(i,11)) for i in range(11) ]
[0,
[1/11],
-[1/11],
[1/11],
[1/11],
[1/11],
-[1/11],
-[1/11],
-[1/11],
[1/11],
-[1/11]]
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps**k})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign*(eps**k)})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
# Does cusp class vanish because of - relations? (See note at top
# of file.)
if k % 2 != 0:
(u, v) = (c.numerator(), c.denominator())
if (2*v) % N == 0:
if (2*u) % v.gcd(N) == 0:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps * (-1)^k [(-u,v)]
#
# where, in the last line, (u,v) is GammaH-equivalent to
# (-u,v) or (u,-v) as eps is 1 or -1.
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when:
#
# - (u,v) is GammaH-equivalent to (-u,v) or (u,-v), and
# - eps is 1 and sign is -1, or eps is -1 and sign is not
# (-1)^k.
#
# (Notice that while this description looks identical to that
# of Gamma1, it differs in that the condition of being GammaH
# equivalent is weaker than that of being Gamma1 equivalent
# when H is larger than {1}.)
#
if sign:
if c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_zero():
if (sign == -1):
self._is_zero.append(len(g)-1)
return self(0)
else:
t, eps = self._is_equiv(c, -c)
if t and ((eps == 1 and sign == -1) or \
(eps == -1 and sign != (-1)**self.weight())):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
class BoundarySpace_wtk_eps(BoundarySpace):
def __init__(self, eps, weight, sign=0):
"""
Space of boundary modular symbols with given weight, character, and
sign.
INPUT:
- ``eps`` - dirichlet.DirichletCharacter, the
"Nebentypus" character.
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(6).0, 4).boundary_space() ; B
Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field
sage: type(B)
<class 'sage.modular.modsym.boundary.BoundarySpace_wtk_eps_with_category'>
sage: B == loads(dumps(B))
True
"""
level = eps.modulus()
sign = int(sign)
self.__eps = eps
if not sign in [-1,0,1]:
raise ArithmeticError("sign must be an int in [-1,0,1]")
if level <= 0:
raise ArithmeticError("level must be positive")
BoundarySpace.__init__(self,
weight = weight,
group = arithgroup.Gamma1(level),
sign = sign,
base_ring = eps.base_ring(),
character = eps)
def _repr_(self):
"""
Return the string representation of self.
EXAMPLES::
sage: ModularSymbols(DirichletGroup(6).0, 4).boundary_space()._repr_()
'Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field'
"""
return ("Boundary Modular Symbols space of level %s, weight %s, character %s " + \
"and dimension %s over %s")%(self.level(), self.weight(),
self.character()._repr_short_(), self.rank(), self.base_ring())
def _is_equiv(self, c1, c2):
"""
Return a pair (b, t), where b is True if c1 and c2 are equivalent
cusps for self, and False otherwise, and t gives extra information
about the equivalence of c1 and c2.
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(12).1, 3).boundary_space()
sage: B._is_equiv(Cusp(0), Cusp(1/3))
(False, None)
sage: B._is_equiv(Cusp(2/3), Cusp(1/3))
(True, 5)
sage: B._is_equiv(Cusp(3/4), Cusp(1/4))
(True, 7)
"""
return c1.is_gamma0_equiv(c2, self.level(), transformation=True)
def _cusp_index(self, cusp):
"""
Returns a pair (i, s), where i is the index of the first cusp in
self._known_cusps() which is equivalent to cusp, and such that
cusp is Gamma0-equivalent to self.character()(s) times
self._known_cusps()[i]. If cusp is not equivalent to any known
cusp, return (-1, 0).
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(11).0**3, 5).boundary_space()
sage: B._cusp_index(Cusp(0))
(-1, 0)
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B._cusp_index(Cusp(0))
(0, 1)
sage: B._coerce_cusp(Cusp(1,11))
[1/11]
sage: B._cusp_index(Cusp(2,11))
(1, -zeta10^2)
"""
g = self._known_gens
N = self.level()
for i in range(len(g)):
t, s = self._is_equiv(cusp, g[i])
if t:
return i, self.__eps(s)
return -1, 0
def _coerce_cusp(self, c):
"""
Coerce the cusp c into self.
EXAMPLES::
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=0).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[[0],
[1/13],
-zeta4*[1/13],
[1/13],
-[1/13],
-zeta4*[1/13],
-zeta4*[1/13],
zeta4*[1/13],
zeta4*[1/13],
[1/13],
-[1/13],
zeta4*[1/13],
-[1/13]]
sage: B._is_equiv(Cusp(oo), Cusp(1,13))
(True, 1)
sage: B._is_equiv(Cusp(0), Cusp(1,13))
(False, None)
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=1).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: B._coerce_cusp(Cusp(oo))
0
sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=-1).boundary_space()
sage: [ B(Cusp(i,13)) for i in range(13) ]
[0,
[1/13],
-zeta4*[1/13],
[1/13],
-[1/13],
-zeta4*[1/13],
-zeta4*[1/13],
zeta4*[1/13],
zeta4*[1/13],
[1/13],
-[1/13],
zeta4*[1/13],
-[1/13]]
sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
[0]
sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=-1).boundary_space()
sage: B._coerce_cusp(Cusp(0))
0
"""
N = self.level()
k = self.weight()
sign = self.sign()
i, eps = self._cusp_index(c)
if i != -1:
if i in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i : eps})
if sign != 0:
i2, eps = self._cusp_index(-c)
if i2 != -1:
if i2 in self._is_zero:
return self(0)
return BoundarySpaceElement(self, {i2:sign*eps})
# found a new cusp class
g = self._known_gens
g.append(c)
self._known_gens_repr.append("[%s]"%c)
###############################################################
# TODO?: This is a very dumb way to check for solutions to an
# equation (see Prop 2.30 of Stein's Ph.D. thesis for which
# equation); however, computing the cusp equivalence for the
# boundary map takes much less time than computing the kernel
# of the boundary map, so it's not worth optimizing this now.
###############################################################
(u, v) = (c.numerator(), c.denominator())
gcd = arith.gcd
d = gcd(v,N)
x = N//d
for j in range(d):
alpha = 1 - j*x
if gcd(alpha, N) == 1:
if (v*(1-alpha))%N == 0 and (u*(1-alpha))%d == 0:
if self.__eps(alpha) != 1:
self._is_zero.append(len(g)-1)
return self(0)
# Does class vanish because of sign relations? The relevant
# relations are
#
# [(u,v)] = (-1)^k [(-u,-v)]
# [(u,v)] = sign * [(-u,v)]
# [(u,v)] = eps(d) * [(-u,v)]
#
# where, in the last line, eps is the character defining
# our space, and [a,b;c,d] takes (u,v) to (-u,v).
#
# Thus (other than for 0 and Infinity), we have that [(u,v)]
# can only be killed by sign relations when the sign is not
# equal to eps(d).
#
if sign:
if c.is_zero():
if sign == -1:
self._is_zero.append(len(g)-1)
return self(0)
elif c.is_infinity():
if sign != (-1)**self.weight():
self._is_zero.append(len(g)-1)
return self(0)
else:
t, s = self._is_equiv(c, -c)
if t:
if sign != self.__eps(s):
self._is_zero.append(len(g)-1)
return self(0)
return BoundarySpaceElement(self, {(len(g)-1):1})
| en | 0.689867 | # -*- coding: utf-8 -*- Space of boundary modular symbols Used mainly for computing the cuspidal subspace of modular symbols. The space of boundary symbols of sign 0 is isomorphic as a Hecke module to the dual of the space of Eisenstein series, but this does not give a useful method of computing Eisenstein series, since there is no easy way to extract the constant terms. We represent boundary modular symbols as a sum of Manin symbols of the form `[P, u/v]`, where `u/v` is a cusp for our group `G`. The group of boundary modular symbols naturally embeds into a vector space `B_k(G)` (see Stein, section 8.4, or Merel, section 1.4, where this space is called `\CC[\Gamma \backslash \QQ]_k`, for a definition), which is a finite dimensional `\QQ` vector space of dimension equal to the number of cusps for `G`. The embedding takes `[P, u/v]` to `P(u,v)\cdot [(u,v)]`. We represent the basis vectors by pairs `[(u,v)]` with u, v coprime. On `B_k(G)`, we have the relations .. MATH:: [\gamma \cdot (u,v)] = [(u,v)] for all `\gamma \in G` and .. MATH:: [(\lambda u, \lambda v)] = \operatorname{sign}(\lambda)^k [(u,v)] for all `\lambda \in \QQ^\times`. It's possible for these relations to kill a class, i.e., for a pair `[(u,v)]` to be 0. For example, when `N=4` and `k=3` then `(-1,-2)` is equivalent mod `\Gamma_1(4)` to `(1,2)` since `2=-2 \bmod 4` and `1=-1 \bmod 2`. But since `k` is odd, `[(-1,-2)]` is also equivalent to `-[(1,2)]`. Thus this symbol is equivalent to its negative, hence 0 (notice that this wouldn't be the case in characteristic 2). This happens for any irregular cusp when the weight is odd; there are no irregular cusps on `\Gamma_1(N)` except when `N = 4`, but there can be more on `\Gamma_H` groups. See also prop 2.30 of Stein's Ph.D. thesis. In addition, in the case that our space is of sign `\sigma = 1` or `-1`, we also have the relation `[(-u,v)] = \sigma \cdot [(u,v)]`. This relation can also combine with the above to kill a cusp class - for instance, take (u,v) = (1,3) for `\Gamma_1(5)`. Then since the cusp `\tfrac{1}{3}` is `\Gamma_1(5)`-equivalent to the cusp `-\tfrac{1}{3}`, we have that `[(1,3)] = [(-1,3)]`. Now, on the minus subspace, we also have that `[(-1,3)] = -[(1,3)]`, which means this class must vanish. Notice that this cannot be used to show that `[(1,0)]` or `[(0,1)]` is 0. .. note:: Special care must be taken when working with the images of the cusps 0 and `\infty` in `B_k(G)`. For all cusps *except* 0 and `\infty`, multiplying the cusp by -1 corresponds to taking `[(u,v)]` to `[(-u,v)]` in `B_k(G)`. This means that `[(u,v)]` is equivalent to `[(-u,v)]` whenever `\tfrac{u}{v}` is equivalent to `-\tfrac{u}{v}`, except in the case of 0 and `\infty`. We have the following conditions for `[(1,0)]` and `[(0,1)]`: - `[(0,1)] = \sigma \cdot [(0,1)]`, so `[(0,1)]` is 0 exactly when `\sigma = -1`. - `[(1,0)] = \sigma \cdot [(-1,0)]` and `[(1,0)] = (-1)^k [(-1,0)]`, so `[(1,0)] = 0` whenever `\sigma \ne (-1)^k`. .. note:: For all the spaces of boundary symbols below, no work is done to determine the cusps for G at creation time. Instead, cusps are added as they are discovered in the course of computation. As a result, the rank of a space can change as a computation proceeds. REFERENCES: - Merel, "Universal Fourier expansions of modular forms." Springer LNM 1585 (1994), pg. 59-95. - Stein, "Modular Forms, a computational approach." AMS (2007). #***************************************************************************** # Copyright (C) 2005 <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** Create a boundary symbol. INPUT: - ``parent`` - BoundarySpace; a space of boundary modular symbols - ``x`` - a dict with integer keys and values in the base field of parent. EXAMPLES:: sage: B = ModularSymbols(Gamma0(32), sign=-1).boundary_space() sage: B(Cusp(1,8)) [1/8] sage: B.0 [1/8] sage: type(B.0) <class 'sage.modular.modsym.boundary.BoundarySpaceElement'> Return self as a vector on the QQ-vector space with basis self.parent()._known_cusps(). EXAMPLES:: sage: B = ModularSymbols(18,4,sign=1).boundary_space() sage: x = B(Cusp(1/2)) ; x [1/2] sage: x.coordinate_vector() (1) sage: ((18/5)*x).coordinate_vector() (18/5) sage: B(Cusp(0)) [0] sage: x.coordinate_vector() (1) sage: x = B(Cusp(1/2)) ; x [1/2] sage: x.coordinate_vector() (1, 0) Return the string representation of self. EXAMPLES:: sage: ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0))._repr_() '[0]' sage: (-6*ModularSymbols(Gamma0(11), 2).boundary_space()(Cusp(0)))._repr_() '-6*[0]' # can't inherit arithmetic operations from HeckeModule, because basis # dimension might change! Return self + other. Assumes that other is a BoundarySpaceElement. EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16)) sage: x + y # indirect doctest [2/7] + [13/16] sage: x + x # indirect doctest 2*[2/7] Return self - other. Assumes that other is a BoundarySpaceElement. EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16)) sage: x - y # indirect doctest [2/7] - [13/16] sage: x - x # indirect doctest 0 Return self \* other. Assumes that other can be coerced into self.parent().base_ring(). EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) sage: x*5 # indirect doctest 5*[2/7] sage: x*-3/5 # indirect doctest -3/5*[2/7] Return other \* self. Assumes that other can be coerced into self.parent().base_ring(). EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(13/16)) sage: 11*x # indirect doctest 11*[13/16] sage: 1/3*x # indirect doctest 1/3*[13/16] Return -self. EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) sage: -x # indirect doctest -[2/7] sage: -x + x # indirect doctest 0 Space of boundary symbols for a congruence subgroup of SL_2(Z). This class is an abstract base class, so only derived classes should be instantiated. INPUT: - ``weight`` - int, the weight - ``group`` - arithgroup.congroup_generic.CongruenceSubgroup, a congruence subgroup. - ``sign`` - int, either -1, 0, or 1 - ``base_ring`` - rings.Ring (defaults to the rational numbers) EXAMPLES:: sage: B = ModularSymbols(Gamma0(11),2).boundary_space() sage: isinstance(B, sage.modular.modsym.boundary.BoundarySpace) True sage: B == loads(dumps(B)) True EXAMPLES:: sage: B2 = ModularSymbols(11, 2).boundary_space() sage: B4 = ModularSymbols(11, 4).boundary_space() sage: B2 == B4 False sage: B2 == ModularSymbols(17, 2).boundary_space() False Return the list of cusps found so far. EXAMPLES:: sage: B = ModularSymbols(Gamma1(12), 4).boundary_space() sage: B._known_cusps() [] sage: ls = [ B(Cusp(i,10)) for i in range(10) ] sage: B._known_cusps() [0, 1/10, 1/5] Return True if self is a space of boundary symbols associated to an ambient space of modular symbols. EXAMPLES:: sage: M = ModularSymbols(Gamma1(6), 4) sage: M.is_ambient() True sage: M.boundary_space().is_ambient() True Return the congruence subgroup associated to this space of boundary modular symbols. EXAMPLES:: sage: ModularSymbols(GammaH(14,[9]), 2).boundary_space().group() Congruence Subgroup Gamma_H(14) with H generated by [9] Return the weight of this space of boundary modular symbols. EXAMPLES:: sage: ModularSymbols(Gamma1(9), 5).boundary_space().weight() 5 Return the Dirichlet character associated to this space of boundary modular symbols. EXAMPLES:: sage: ModularSymbols(DirichletGroup(7).0, 6).boundary_space().character() Dirichlet character modulo 7 of conductor 7 mapping 3 |--> zeta6 Return the sign of the complex conjugation involution on this space of boundary modular symbols. EXAMPLES:: sage: ModularSymbols(13,2,sign=-1).boundary_space().sign() -1 Return the i-th generator of this space. EXAMPLES:: sage: B = ModularSymbols(Gamma0(24), 4).boundary_space() sage: B.gen(0) Traceback (most recent call last): ... ValueError: only 0 generators known for Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(24) of weight 4 and over Rational Field sage: B(Cusp(1/3)) [1/3] sage: B.gen(0) [1/3] Return the length of self, i.e. the dimension of the underlying vector space. EXAMPLES:: sage: B = ModularSymbols(Gamma0(36),4,sign=1).boundary_space() sage: B.__len__() 0 sage: len(B) 0 sage: x = B(Cusp(0)) ; y = B(Cusp(oo)) ; len(B) 2 Return the underlying free module for self. EXAMPLES:: sage: B = ModularSymbols(Gamma1(7), 5, sign=-1).boundary_space() sage: B.free_module() Sparse vector space of dimension 0 over Rational Field sage: x = B(Cusp(0)) ; y = B(Cusp(1/7)) ; B.free_module() Sparse vector space of dimension 2 over Rational Field The rank of the space generated by boundary symbols that have been found so far in the course of computing the boundary map. .. warning:: This number may change as more elements are coerced into this space!! (This is an implementation detail that will likely change.) EXAMPLES:: sage: M = ModularSymbols(Gamma0(72), 2) ; B = M.boundary_space() sage: B.rank() 0 sage: _ = [ B(x) for x in M.basis() ] sage: B.rank() 16 ##################################################################### # Coercion ##################################################################### Coerce the Manin symbol x into self. (That is, return the image of x under the boundary map.) Assumes that x is associated to the same space of modular symbols as self. EXAMPLES:: sage: M = ModularSymbols(Gamma1(5), 4) ; B = M.boundary_space() sage: [ B(x) for x in M.basis() ] [-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]] sage: [ B._coerce_in_manin_symbol(x) for x in M.manin_symbols_basis() ] [-[2/5], -[-1/5], -[1/2], -[1/2], -[1/4], -[1/4]] Coerce x into a boundary symbol space. If x is a modular symbol (with the same group, weight, character, sign, and base field), this returns the image of that modular symbol under the boundary map. EXAMPLES:: sage: M = ModularSymbols(Gamma0(15), 2) ; B = M.boundary_space() sage: B(M.0) [Infinity] - [0] sage: B(Cusp(1)) [0] sage: B(Cusp(oo)) [Infinity] sage: B(7) Traceback (most recent call last): ... TypeError: Coercion of 7 (of type <type 'sage.rings.integer.Integer'>) into Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(15) of weight 2 and over Rational Field not (yet) defined. Return the string representation of self. EXAMPLES:: sage: sage.modular.modsym.boundary.BoundarySpace(Gamma0(3), 2)._repr_() 'Space of Boundary Modular Symbols of weight 2 for Congruence Subgroup Gamma0(3) with sign 0 and character [1] over Rational Field' Return the index of the first cusp in self._known_cusps() equivalent to cusp, or -1 if cusp is not equivalent to any cusp found so far. EXAMPLES:: sage: B = ModularSymbols(Gamma0(21), 4).boundary_space() sage: B._cusp_index(Cusp(0)) -1 sage: _ = B(Cusp(oo)) sage: _ = B(Cusp(0)) sage: B._cusp_index(Cusp(0)) 1 Initialize a space of boundary symbols of weight k for Gamma_0(N) over base field F. INPUT: - ``level`` - int, the level - ``weight`` - integer weight = 2. - ``sign`` - int, either -1, 0, or 1 - ``F`` - field EXAMPLES:: sage: B = ModularSymbols(Gamma0(2), 5).boundary_space() sage: type(B) <class 'sage.modular.modsym.boundary.BoundarySpace_wtk_g0_with_category'> sage: B == loads(dumps(B)) True Return the string representation of self. EXAMPLES:: sage: B = ModularSymbols(Gamma0(97), 3).boundary_space() sage: B._repr_() 'Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(97) of weight 3 and over Rational Field' Coerce the cusp c into this boundary symbol space. EXAMPLES:: sage: B = ModularSymbols(Gamma0(17), 6).boundary_space() sage: B._coerce_cusp(Cusp(0)) [0] sage: B = ModularSymbols(Gamma0(17), 6, sign=-1).boundary_space() sage: B._coerce_cusp(Cusp(0)) 0 sage: B = ModularSymbols(Gamma0(16), 4).boundary_space() sage: [ B(Cusp(i,4)) for i in range(4) ] [[0], [1/4], [1/2], [3/4]] sage: B = ModularSymbols(Gamma0(16), 4, sign=1).boundary_space() sage: [ B(Cusp(i,4)) for i in range(4) ] [[0], [1/4], [1/2], [1/4]] sage: B = ModularSymbols(Gamma0(16), 4, sign=-1).boundary_space() sage: [ B(Cusp(i,4)) for i in range(4) ] [0, [1/4], 0, -[1/4]] # see if we've already found this cusp # see if we've already found -c # found a new cusp class # See if the new cusp is killed by sign relations. The # relevant relations (for cusps other than 0 and Infinity) # are: # # [(u,v)] = (-1)^k [(-u,-v)] # [(u,v)] = [gamma * (u,v)] # [(-u,v)] = sign * [(u,v)] # # So since k is always even on Gamma0, we have that [(u,v)] = # 0 from the above relations exactly when (u,v) = gamma*(-u,v) # and the sign is -1. # NOTE: this code looks wrong. One should do the # following: # # - if c is 0, if the sign is -1, append & return 0 # - if c is Infinity, then if the sign # is not equal to (-1)**self.weight(), then # append & return 0 # - otherwise, if the sign is -1, and c is # equivalent to -c, append & return 0. # # Interestingly, the code below does precisely that. # (It's important to recall that for Gamma0, odd weight # spaces are 0.) Determine whether or not c1 and c2 are equivalent for self. EXAMPLES:: sage: B = ModularSymbols(Gamma0(24), 6).boundary_space() sage: B._is_equiv(Cusp(0), Cusp(oo)) False sage: B._is_equiv(Cusp(0), Cusp(1)) True Initialize a space of boundary modular symbols for Gamma1(N). INPUT: - ``level`` - int, the level - ``weight`` - int, the weight = 2 - ``sign`` - int, either -1, 0, or 1 - ``F`` - base ring EXAMPLES:: sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_g1 sage: B = BoundarySpace_wtk_g1(17, 2, 0, QQ) ; B Boundary Modular Symbols space for Gamma_1(17) of weight 2 over Rational Field sage: B == loads(dumps(B)) True Return the string representation of self. EXAMPLES:: sage: ModularSymbols(Gamma1(5), 3, sign=1).boundary_space()._repr_() 'Boundary Modular Symbols space for Gamma_1(5) of weight 3 over Rational Field' Return True if c1 and c2 are equivalent cusps for self, and False otherwise. EXAMPLES:: sage: B = ModularSymbols(Gamma1(10), 4).boundary_space() sage: B._is_equiv(Cusp(0), Cusp(1/5)) (False, 0) sage: B._is_equiv(Cusp(4/5), Cusp(1/5)) (True, -1) sage: B._is_equiv(Cusp(-4/5), Cusp(1/5)) (True, 1) Returns a pair (i, t), where i is the index of the first cusp in self._known_cusps() which is equivalent to cusp, and t is 1 or -1 as cusp is Gamma1-equivalent to plus or minus self._known_cusps()[i]. If cusp is not equivalent to any known cusp, return (-1, 0). EXAMPLES:: sage: B = ModularSymbols(Gamma1(11),2).boundary_space() sage: B._cusp_index(Cusp(1/11)) (-1, 0) sage: B._cusp_index(Cusp(10/11)) (-1, 0) sage: B._coerce_cusp(Cusp(1/11)) [1/11] sage: B._cusp_index(Cusp(1/11)) (0, 1) sage: B._cusp_index(Cusp(10/11)) (0, -1) Coerce a cusp into this boundary symbol space. EXAMPLES:: sage: B = ModularSymbols(Gamma1(4), 4).boundary_space() sage: B._coerce_cusp(Cusp(1/2)) [1/2] sage: B._coerce_cusp(Cusp(1/4)) [1/4] sage: B._coerce_cusp(Cusp(3/4)) [1/4] sage: B = ModularSymbols(Gamma1(5), 3, sign=-1).boundary_space() sage: B._coerce_cusp(Cusp(0)) 0 sage: B._coerce_cusp(Cusp(oo)) [Infinity] sage: B = ModularSymbols(Gamma1(2), 3, sign=-1).boundary_space() sage: B._coerce_cusp(Cusp(0)) 0 sage: B._coerce_cusp(Cusp(oo)) 0 sage: B = ModularSymbols(Gamma1(7), 3).boundary_space() sage: [ B(Cusp(i,7)) for i in range(7) ] [[0], [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]] sage: B._is_equiv(Cusp(1,6), Cusp(5,6)) (True, 1) sage: B._is_equiv(Cusp(1,6), Cusp(0)) (True, -1) sage: B(Cusp(0)) [0] sage: B = ModularSymbols(Gamma1(7), 3, sign=1).boundary_space() sage: [ B(Cusp(i,7)) for i in range(7) ] [[0], 0, 0, 0, 0, 0, 0] sage: B = ModularSymbols(Gamma1(7), 3, sign=-1).boundary_space() sage: [ B(Cusp(i,7)) for i in range(7) ] [0, [1/7], [2/7], [3/7], -[3/7], -[2/7], -[1/7]] # found a new cusp class # Does cusp class vanish because of - relations? (See note at top # of file.) # Does class vanish because of sign relations? The relevant # relations are # # [(u,v)] = (-1)^k [(-u,-v)] # [(u,v)] = sign * [(-u,v)] # [(u,v)] = eps * (-1)^k [(-u,v)] # # where, in the last line, (u,v) is Gamma1-equivalent to # (-u,v) or (u,-v) as eps is 1 or -1. # # Thus (other than for 0 and Infinity), we have that [(u,v)] # can only be killed by sign relations when: # # - (u,v) is Gamma1-equivalent to (-u,v) or (u,-v), and # - eps is 1 and sign is -1, or eps is -1 and sign is not # (-1)^k. # Initialize a space of boundary modular symbols for GammaH(N). INPUT: - ``group`` - congruence subgroup Gamma_H(N). - ``weight`` - int, the weight = 2 - ``sign`` - int, either -1, 0, or 1 - ``F`` - base ring EXAMPLES:: sage: from sage.modular.modsym.boundary import BoundarySpace_wtk_gamma_h sage: B = BoundarySpace_wtk_gamma_h(GammaH(13,[3]), 2, 0, QQ) ; B Boundary Modular Symbols space for Congruence Subgroup Gamma_H(13) with H generated by [3] of weight 2 over Rational Field sage: B == loads(dumps(B)) True Return the string representation of self. EXAMPLES:: sage: ModularSymbols(GammaH(7,[2]), 4).boundary_space()._repr_() 'Boundary Modular Symbols space for Congruence Subgroup Gamma_H(7) with H generated by [2] of weight 4 over Rational Field' Return a pair of the form (b, t), where b is True if c1 and c2 are equivalent cusps for self, and False otherwise, and t gives extra information about the equivalence between c1 and c2. EXAMPLES:: sage: B = ModularSymbols(GammaH(7,[2]), 4).boundary_space() sage: B._is_equiv(Cusp(0), Cusp(1/7)) (False, 0) sage: B._is_equiv(Cusp(2/7), Cusp(1/7)) (True, 1) sage: B._is_equiv(Cusp(3/7), Cusp(1/7)) (True, -1) Returns a pair (i, t), where i is the index of the first cusp in self._known_cusps() which is equivalent to cusp, and t is 1 or -1 as cusp is GammaH-equivalent to plus or minus self._known_cusps()[i]. If cusp is not equivalent to any known cusp, return (-1, 0). EXAMPLES:: sage: M = ModularSymbols(GammaH(9,[4]), 3) sage: B = M.boundary_space() sage: B._cusp_index(Cusp(0)) (-1, 0) sage: _ = [ B(x) for x in M.basis() ] sage: B._cusp_index(Cusp(0)) (1, -1) sage: B._cusp_index(Cusp(5/6)) (3, 1) Coerce the cusp c into self. EXAMPLES:: sage: B = ModularSymbols(GammaH(10,[9]), 2).boundary_space() sage: B(Cusp(0)) [0] sage: B(Cusp(1/3)) [1/3] sage: B(Cusp(1/13)) [1/3] sage: B = ModularSymbols(GammaH(25, [6]), 2).boundary_space() sage: B._coerce_cusp(Cusp(0)) [0] :: sage: B = ModularSymbols(GammaH(11,[3]), 3).boundary_space() sage: [ B(Cusp(i,11)) for i in range(11) ] [[0], [1/11], -[1/11], [1/11], [1/11], [1/11], -[1/11], -[1/11], -[1/11], [1/11], -[1/11]] sage: B._is_equiv(Cusp(0), Cusp(1,11)) (False, 0) sage: B._is_equiv(Cusp(oo), Cusp(1,11)) (True, 1) sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=1).boundary_space() sage: [ B(Cusp(i,11)) for i in range(11) ] [[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] sage: B = ModularSymbols(GammaH(11,[3]), 3, sign=-1).boundary_space() sage: [ B(Cusp(i,11)) for i in range(11) ] [0, [1/11], -[1/11], [1/11], [1/11], [1/11], -[1/11], -[1/11], -[1/11], [1/11], -[1/11]] # found a new cusp class # Does cusp class vanish because of - relations? (See note at top # of file.) # Does class vanish because of sign relations? The relevant # relations are # # [(u,v)] = (-1)^k [(-u,-v)] # [(u,v)] = sign * [(-u,v)] # [(u,v)] = eps * (-1)^k [(-u,v)] # # where, in the last line, (u,v) is GammaH-equivalent to # (-u,v) or (u,-v) as eps is 1 or -1. # # Thus (other than for 0 and Infinity), we have that [(u,v)] # can only be killed by sign relations when: # # - (u,v) is GammaH-equivalent to (-u,v) or (u,-v), and # - eps is 1 and sign is -1, or eps is -1 and sign is not # (-1)^k. # # (Notice that while this description looks identical to that # of Gamma1, it differs in that the condition of being GammaH # equivalent is weaker than that of being Gamma1 equivalent # when H is larger than {1}.) # Space of boundary modular symbols with given weight, character, and sign. INPUT: - ``eps`` - dirichlet.DirichletCharacter, the "Nebentypus" character. - ``weight`` - int, the weight = 2 - ``sign`` - int, either -1, 0, or 1 EXAMPLES:: sage: B = ModularSymbols(DirichletGroup(6).0, 4).boundary_space() ; B Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field sage: type(B) <class 'sage.modular.modsym.boundary.BoundarySpace_wtk_eps_with_category'> sage: B == loads(dumps(B)) True Return the string representation of self. EXAMPLES:: sage: ModularSymbols(DirichletGroup(6).0, 4).boundary_space()._repr_() 'Boundary Modular Symbols space of level 6, weight 4, character [-1] and dimension 0 over Rational Field' Return a pair (b, t), where b is True if c1 and c2 are equivalent cusps for self, and False otherwise, and t gives extra information about the equivalence of c1 and c2. EXAMPLES:: sage: B = ModularSymbols(DirichletGroup(12).1, 3).boundary_space() sage: B._is_equiv(Cusp(0), Cusp(1/3)) (False, None) sage: B._is_equiv(Cusp(2/3), Cusp(1/3)) (True, 5) sage: B._is_equiv(Cusp(3/4), Cusp(1/4)) (True, 7) Returns a pair (i, s), where i is the index of the first cusp in self._known_cusps() which is equivalent to cusp, and such that cusp is Gamma0-equivalent to self.character()(s) times self._known_cusps()[i]. If cusp is not equivalent to any known cusp, return (-1, 0). EXAMPLES:: sage: B = ModularSymbols(DirichletGroup(11).0**3, 5).boundary_space() sage: B._cusp_index(Cusp(0)) (-1, 0) sage: B._coerce_cusp(Cusp(0)) [0] sage: B._cusp_index(Cusp(0)) (0, 1) sage: B._coerce_cusp(Cusp(1,11)) [1/11] sage: B._cusp_index(Cusp(2,11)) (1, -zeta10^2) Coerce the cusp c into self. EXAMPLES:: sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=0).boundary_space() sage: [ B(Cusp(i,13)) for i in range(13) ] [[0], [1/13], -zeta4*[1/13], [1/13], -[1/13], -zeta4*[1/13], -zeta4*[1/13], zeta4*[1/13], zeta4*[1/13], [1/13], -[1/13], zeta4*[1/13], -[1/13]] sage: B._is_equiv(Cusp(oo), Cusp(1,13)) (True, 1) sage: B._is_equiv(Cusp(0), Cusp(1,13)) (False, None) sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=1).boundary_space() sage: [ B(Cusp(i,13)) for i in range(13) ] [[0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] sage: B._coerce_cusp(Cusp(oo)) 0 sage: B = ModularSymbols(DirichletGroup(13).0**3, 5, sign=-1).boundary_space() sage: [ B(Cusp(i,13)) for i in range(13) ] [0, [1/13], -zeta4*[1/13], [1/13], -[1/13], -zeta4*[1/13], -zeta4*[1/13], zeta4*[1/13], zeta4*[1/13], [1/13], -[1/13], zeta4*[1/13], -[1/13]] sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=1).boundary_space() sage: B._coerce_cusp(Cusp(0)) [0] sage: B = ModularSymbols(DirichletGroup(13).0**4, 5, sign=-1).boundary_space() sage: B._coerce_cusp(Cusp(0)) 0 # found a new cusp class ############################################################### # TODO?: This is a very dumb way to check for solutions to an # equation (see Prop 2.30 of Stein's Ph.D. thesis for which # equation); however, computing the cusp equivalence for the # boundary map takes much less time than computing the kernel # of the boundary map, so it's not worth optimizing this now. ############################################################### # Does class vanish because of sign relations? The relevant # relations are # # [(u,v)] = (-1)^k [(-u,-v)] # [(u,v)] = sign * [(-u,v)] # [(u,v)] = eps(d) * [(-u,v)] # # where, in the last line, eps is the character defining # our space, and [a,b;c,d] takes (u,v) to (-u,v). # # Thus (other than for 0 and Infinity), we have that [(u,v)] # can only be killed by sign relations when the sign is not # equal to eps(d). # | 2.990703 | 3 |
udf/buildbib.py | metazool/pegamatites-xDD | 0 | 6624902 | <filename>udf/buildbib.py
#==============================================================================
#BUILD A BASIC BIBLIOGRAPHY
#==============================================================================
import json,psycopg2, yaml
from yaml import Loader
# Connect to Postgres
with open('./credentials', 'r') as credential_yaml:
credentials = yaml.load(credential_yaml, Loader=Loader)
with open('./config', 'r') as config_yaml:
config = yaml.load(config_yaml, Loader=Loader)
# Connect to Postgres
connection = psycopg2.connect(
password=credentials['postgres']['password'],
dbname=credentials['postgres']['database'],
user=credentials['postgres']['user'],
host=credentials['postgres']['host'],
port=credentials['postgres']['port'])
cursor = connection.cursor()
#initialize the table
cursor.execute("""
DELETE FROM bib
""")
connection.commit()
#load in the bibJSON file
with open('./input/bibjson') as fid:
bib=json.load(fid)
#push docid, authors, title, journal name and url to PostGRES
for idx,item in enumerate(bib):
#initialize the variables to push to psql
docid=[]
title=[]
journal=[]
names=[]
url =[]
#as failsafe, always check if each variable exists
if isinstance(item['_gddid'],str):
docid=item['_gddid'].encode('ascii','ignore')
else:
docid=item['_gddid']
if isinstance(item['title'],str):
title=item['title'].encode('ascii','ignore')
else:
title=item['title']
if isinstance(item['journal']['name'],str):
journal=item['journal']['name'].encode('ascii','ignore')
else:
journal=item['journal']['name']
if 'author' in list(item.keys()):
for name in item['author']:
names.append(name['name'].encode('ascii','ignore'))
if 'link' in list(item.keys()):
url=item['link'][0]['url']
for link in item['link']:
if link['type']=='sciencedirect':
url=link['url']
#psql table insertion
cursor.execute("""
INSERT INTO bib ( docid,
author,
title,
journal,
url)
VALUES (%s, %s, %s, %s, %s);""",
(docid,names,title,journal,url)
)
connection.commit()
#update the table with number of instances per journal name
cursor.execute(""" WITH query AS(SELECT journal, COUNT(journal)
FROM bib
GROUP BY journal)
UPDATE bib
SET journal_instances = query.count
FROM query
WHERE bib.journal = query.journal
""")
connection.commit()
#close the connection
connection.close()
| <filename>udf/buildbib.py
#==============================================================================
#BUILD A BASIC BIBLIOGRAPHY
#==============================================================================
import json,psycopg2, yaml
from yaml import Loader
# Connect to Postgres
with open('./credentials', 'r') as credential_yaml:
credentials = yaml.load(credential_yaml, Loader=Loader)
with open('./config', 'r') as config_yaml:
config = yaml.load(config_yaml, Loader=Loader)
# Connect to Postgres
connection = psycopg2.connect(
password=credentials['postgres']['password'],
dbname=credentials['postgres']['database'],
user=credentials['postgres']['user'],
host=credentials['postgres']['host'],
port=credentials['postgres']['port'])
cursor = connection.cursor()
#initialize the table
cursor.execute("""
DELETE FROM bib
""")
connection.commit()
#load in the bibJSON file
with open('./input/bibjson') as fid:
bib=json.load(fid)
#push docid, authors, title, journal name and url to PostGRES
for idx,item in enumerate(bib):
#initialize the variables to push to psql
docid=[]
title=[]
journal=[]
names=[]
url =[]
#as failsafe, always check if each variable exists
if isinstance(item['_gddid'],str):
docid=item['_gddid'].encode('ascii','ignore')
else:
docid=item['_gddid']
if isinstance(item['title'],str):
title=item['title'].encode('ascii','ignore')
else:
title=item['title']
if isinstance(item['journal']['name'],str):
journal=item['journal']['name'].encode('ascii','ignore')
else:
journal=item['journal']['name']
if 'author' in list(item.keys()):
for name in item['author']:
names.append(name['name'].encode('ascii','ignore'))
if 'link' in list(item.keys()):
url=item['link'][0]['url']
for link in item['link']:
if link['type']=='sciencedirect':
url=link['url']
#psql table insertion
cursor.execute("""
INSERT INTO bib ( docid,
author,
title,
journal,
url)
VALUES (%s, %s, %s, %s, %s);""",
(docid,names,title,journal,url)
)
connection.commit()
#update the table with number of instances per journal name
cursor.execute(""" WITH query AS(SELECT journal, COUNT(journal)
FROM bib
GROUP BY journal)
UPDATE bib
SET journal_instances = query.count
FROM query
WHERE bib.journal = query.journal
""")
connection.commit()
#close the connection
connection.close()
| en | 0.453891 | #============================================================================== #BUILD A BASIC BIBLIOGRAPHY #============================================================================== # Connect to Postgres # Connect to Postgres #initialize the table DELETE FROM bib #load in the bibJSON file #push docid, authors, title, journal name and url to PostGRES #initialize the variables to push to psql #as failsafe, always check if each variable exists #psql table insertion INSERT INTO bib ( docid, author, title, journal, url) VALUES (%s, %s, %s, %s, %s); #update the table with number of instances per journal name WITH query AS(SELECT journal, COUNT(journal) FROM bib GROUP BY journal) UPDATE bib SET journal_instances = query.count FROM query WHERE bib.journal = query.journal #close the connection | 2.608432 | 3 |
ckan/logic/action/create.py | sirca/ckan | 1 | 6624903 | '''API functions for adding data to CKAN.'''
import logging
import random
import re
from pylons import config
import paste.deploy.converters
from sqlalchemy import func
import ckan.lib.plugins as lib_plugins
import ckan.logic as logic
import ckan.rating as ratings
import ckan.plugins as plugins
import ckan.lib.dictization
import ckan.logic.action
import ckan.logic.schema
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.dictization.model_save as model_save
import ckan.lib.navl.dictization_functions
import ckan.lib.uploader as uploader
import ckan.lib.navl.validators as validators
import ckan.lib.mailer as mailer
import ckan.lib.datapreview as datapreview
from ckan.common import _
# FIXME this looks nasty and should be shared better
from ckan.logic.action.update import _update_package_relationship
log = logging.getLogger(__name__)
# Define some shortcuts
# Ensure they are module-private so that they don't get loaded as available
# actions in the action API.
_validate = ckan.lib.navl.dictization_functions.validate
_check_access = logic.check_access
_get_action = logic.get_action
ValidationError = logic.ValidationError
NotFound = logic.NotFound
_get_or_bust = logic.get_or_bust
def package_create(context, data_dict):
'''Create a new dataset (package).
You must be authorized to create new datasets. If you specify any groups
for the new dataset, you must also be authorized to edit these groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IDatasetForm` plugin interface.
:param name: the name of the new dataset, must be between 2 and 100
characters long and contain only lowercase alphanumeric characters,
``-`` and ``_``, e.g. ``'warandpeace'``
:type name: string
:param title: the title of the dataset (optional, default: same as
``name``)
:type title: string
:param author: the name of the dataset's author (optional)
:type author: string
:param author_email: the email address of the dataset's author (optional)
:type author_email: string
:param maintainer: the name of the dataset's maintainer (optional)
:type maintainer: string
:param maintainer_email: the email address of the dataset's maintainer
(optional)
:type maintainer_email: string
:param license_id: the id of the dataset's license, see
:py:func:`~ckan.logic.action.get.license_list` for available values
(optional)
:type license_id: license id string
:param notes: a description of the dataset (optional)
:type notes: string
:param url: a URL for the dataset's source (optional)
:type url: string
:param version: (optional)
:type version: string, no longer than 100 characters
:param state: the current state of the dataset, e.g. ``'active'`` or
``'deleted'``, only active datasets show up in search results and
other lists of datasets, this parameter will be ignored if you are not
authorized to change the state of the dataset (optional, default:
``'active'``)
:type state: string
:param type: the type of the dataset (optional),
:py:class:`~ckan.plugins.interfaces.IDatasetForm` plugins
associate themselves with different dataset types and provide custom
dataset handling behaviour for these types
:type type: string
:param resources: the dataset's resources, see
:py:func:`resource_create` for the format of resource dictionaries
(optional)
:type resources: list of resource dictionaries
:param tags: the dataset's tags, see :py:func:`tag_create` for the format
of tag dictionaries (optional)
:type tags: list of tag dictionaries
:param extras: the dataset's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to datasets, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string)
:type extras: list of dataset extra dictionaries
:param relationships_as_object: see :py:func:`package_relationship_create`
for the format of relationship dictionaries (optional)
:type relationships_as_object: list of relationship dictionaries
:param relationships_as_subject: see :py:func:`package_relationship_create`
for the format of relationship dictionaries (optional)
:type relationships_as_subject: list of relationship dictionaries
:param groups: the groups to which the dataset belongs (optional), each
group dictionary should have one or more of the following keys which
identify an existing group:
``'id'`` (the id of the group, string), ``'name'`` (the name of the
group, string), ``'title'`` (the title of the group, string), to see
which groups exist call :py:func:`~ckan.logic.action.get.group_list`
:type groups: list of dictionaries
:param owner_org: the id of the dataset's owning organization, see
:py:func:`~ckan.logic.action.get.organization_list` or
:py:func:`~ckan.logic.action.get.organization_list_for_user` for
available values (optional)
:type owner_org: string
:returns: the newly created dataset (unless 'return_id_only' is set to True
in the context, in which case just the dataset id will
be returned)
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_type = data_dict.get('type')
package_plugin = lib_plugins.lookup_package_plugin(package_type)
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.create_package_schema()
_check_access('package_create', context, data_dict)
if 'api_version' not in context:
# check_data_dict() is deprecated. If the package_plugin has a
# check_data_dict() we'll call it, if it doesn't have the method we'll
# do nothing.
check_data_dict = getattr(package_plugin, 'check_data_dict', None)
if check_data_dict:
try:
check_data_dict(data_dict, schema)
except TypeError:
# Old plugins do not support passing the schema so we need
# to ensure they still work
package_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
package_plugin, context, data_dict, schema, 'package_create')
log.debug('package_create validate_errs=%r user=%s package=%s data=%r',
errors, context.get('user'),
data.get('name'), data_dict)
if errors:
model.Session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
admins = []
if user:
user_obj = model.User.by_name(user.decode('utf8'))
if user_obj:
admins = [user_obj]
data['creator_user_id'] = user_obj.id
pkg = model_save.package_dict_save(data, context)
model.setup_default_user_roles(pkg, admins)
# Needed to let extensions know the package id
model.Session.flush()
data['id'] = pkg.id
context_org_update = context.copy()
context_org_update['ignore_auth'] = True
context_org_update['defer_commit'] = True
_get_action('package_owner_org_update')(context_org_update,
{'id': pkg.id,
'organization_id': pkg.owner_org})
for item in plugins.PluginImplementations(plugins.IPackageController):
item.create(pkg)
item.after_create(context, data)
if not context.get('defer_commit'):
model.repo.commit()
## need to let rest api create
context["package"] = pkg
## this is added so that the rest controller can make a new location
context["id"] = pkg.id
log.debug('Created object %s' % pkg.name)
# Make sure that a user provided schema is not used on package_show
context.pop('schema', None)
return_id_only = context.get('return_id_only', False)
output = context['id'] if return_id_only \
else _get_action('package_show')(context, {'id': context['id']})
return output
def resource_create(context, data_dict):
'''Appends a new resource to a datasets list of resources.
:param package_id: id of package that the resource should be added to.
:type package_id: string
:param url: url of resource
:type url: string
:param revision_id: (optional)
:type revision_id: string
:param description: (optional)
:type description: string
:param format: (optional)
:type format: string
:param hash: (optional)
:type hash: string
:param name: (optional)
:type name: string
:param resource_type: (optional)
:type resource_type: string
:param mimetype: (optional)
:type mimetype: string
:param mimetype_inner: (optional)
:type mimetype_inner: string
:param webstore_url: (optional)
:type webstore_url: string
:param cache_url: (optional)
:type cache_url: string
:param size: (optional)
:type size: int
:param created: (optional)
:type created: iso date string
:param last_modified: (optional)
:type last_modified: iso date string
:param cache_last_updated: (optional)
:type cache_last_updated: iso date string
:param webstore_last_updated: (optional)
:type webstore_last_updated: iso date string
:param upload: (optional)
:type upload: FieldStorage (optional) needs multipart/form-data
:returns: the newly created resource
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_id = _get_or_bust(data_dict, 'package_id')
data_dict.pop('package_id')
_get_or_bust(data_dict, 'url')
pkg_dict = _get_action('package_show')(context, {'id': package_id})
_check_access('resource_create', context, data_dict)
for plugin in plugins.PluginImplementations(plugins.IResourceController):
plugin.before_create(context, data_dict)
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
upload = uploader.ResourceUpload(data_dict)
pkg_dict['resources'].append(data_dict)
try:
context['defer_commit'] = True
context['use_cache'] = False
_get_action('package_update')(context, pkg_dict)
context.pop('defer_commit')
except ValidationError, e:
errors = e.error_dict['resources'][-1]
raise ValidationError(errors)
## Get out resource_id resource from model as it will not appear in
## package_show until after commit
upload.upload(context['package'].resources[-1].id,
uploader.get_max_resource_size())
model.repo.commit()
## Run package show again to get out actual last_resource
pkg_dict = _get_action('package_show')(context, {'id': package_id})
resource = pkg_dict['resources'][-1]
for plugin in plugins.PluginImplementations(plugins.IResourceController):
plugin.after_create(context, resource)
return resource
def resource_view_create(context, data_dict):
'''Creates a new resource view.
:param resource_id: id of the resource
:type resource_id: string
:param title: the title of the view
:type title: string
:param description: a description of the view (optional)
:type description: string
:param view_type: type of view
:type view_type: string
:param config: options necessary to recreate a view state (optional)
:type config: JSON string
:returns: the newly created resource view
:rtype: dictionary
'''
model = context['model']
schema = (context.get('schema') or
ckan.logic.schema.default_create_resource_view_schema())
resource_id = _get_or_bust(data_dict, 'resource_id')
view_type = _get_or_bust(data_dict, 'view_type')
view_plugin = datapreview.get_view_plugin(view_type)
if not view_plugin:
raise ValidationError(
{"view_type": "No plugin found for view_type {view_type}".format(
view_type=view_type
)}
)
plugin_schema = view_plugin.info().get('schema', {})
schema.update(plugin_schema)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
_check_access('resource_view_create', context, data_dict)
if context.get('preview'):
return data
max_order = model.Session.query(
func.max(model.ResourceView.order)
).filter_by(resource_id=resource_id).first()
order = 0
if max_order[0] is not None:
order = max_order[0] + 1
data['order'] = order
resource_view = model_save.resource_view_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
return model_dictize.resource_view_dictize(resource_view, context)
def related_create(context, data_dict):
'''Add a new related item to a dataset.
You must provide your API key in the Authorization header.
:param title: the title of the related item
:type title: string
:param type: the type of the related item, e.g. ``'Application'``,
``'Idea'`` or ``'Visualisation'``
:type type: string
:param id: the id of the related item (optional)
:type id: string
:param description: the description of the related item (optional)
:type description: string
:param url: the URL to the related item (optional)
:type url: string
:param image_url: the URL to the image for the related item (optional)
:type image_url: string
:param dataset_id: the name or id of the dataset that the related item
belongs to (optional)
:type dataset_id: string
:returns: the newly created related item
:rtype: dictionary
'''
model = context['model']
session = context['session']
user = context['user']
userobj = model.User.get(user)
_check_access('related_create', context, data_dict)
data_dict["owner_id"] = userobj.id
data, errors = _validate(
data_dict, ckan.logic.schema.default_related_schema(), context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
related = model_save.related_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
dataset_dict = None
if 'dataset_id' in data_dict:
dataset = model.Package.get(data_dict['dataset_id'])
dataset.related.append(related)
model.repo.commit_and_remove()
dataset_dict = ckan.lib.dictization.table_dictize(dataset, context)
session.flush()
related_dict = model_dictize.related_dictize(related, context)
activity_dict = {
'user_id': userobj.id,
'object_id': related.id,
'activity_type': 'new related item',
}
activity_dict['data'] = {
'related': related_dict,
'dataset': dataset_dict,
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context,
activity_dict)
session.commit()
context["related"] = related
context["id"] = related.id
log.debug('Created object %s' % related.title)
return related_dict
def package_relationship_create(context, data_dict):
'''Create a relationship between two datasets (packages).
You must be authorized to edit both the subject and the object datasets.
:param subject: the id or name of the dataset that is the subject of the
relationship
:type subject: string
:param object: the id or name of the dataset that is the object of the
relationship
:param type: the type of the relationship, one of ``'depends_on'``,
``'dependency_of'``, ``'derives_from'``, ``'has_derivation'``,
``'links_to'``, ``'linked_from'``, ``'child_of'`` or ``'parent_of'``
:type type: string
:param comment: a comment about the relationship (optional)
:type comment: string
:returns: the newly created package relationship
:rtype: dictionary
'''
model = context['model']
user = context['user']
schema = context.get('schema') \
or ckan.logic.schema.default_create_relationship_schema()
api = context.get('api_version')
ref_package_by = 'id' if api == 2 else 'name'
id, id2, rel_type = _get_or_bust(data_dict, ['subject', 'object', 'type'])
comment = data_dict.get('comment', u'')
pkg1 = model.Package.get(id)
pkg2 = model.Package.get(id2)
if not pkg1:
raise NotFound('Subject package %r was not found.' % id)
if not pkg2:
return NotFound('Object package %r was not found.' % id2)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
_check_access('package_relationship_create', context, data_dict)
# Create a Package Relationship.
existing_rels = pkg1.get_relationships_with(pkg2, rel_type)
if existing_rels:
return _update_package_relationship(existing_rels[0],
comment, context)
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Create package relationship: %s %s %s') \
% (pkg1, rel_type, pkg2)
rel = pkg1.add_relationship(rel_type, pkg2, comment=comment)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
context['relationship'] = rel
relationship_dicts = rel.as_dict(ref_package_by=ref_package_by)
return relationship_dicts
def member_create(context, data_dict=None):
'''Make an object (e.g. a user, dataset or group) a member of a group.
If the object is already a member of the group then the capacity of the
membership will be updated.
You must be authorized to edit the group.
:param id: the id or name of the group to add the object to
:type id: string
:param object: the id or name of the object to add
:type object: string
:param object_type: the type of the object being added, e.g. ``'package'``
or ``'user'``
:type object_type: string
:param capacity: the capacity of the membership
:type capacity: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
model = context['model']
user = context['user']
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create member object %s') \
% data_dict.get('name', '')
group_id, obj_id, obj_type, capacity = \
_get_or_bust(data_dict, ['id', 'object', 'object_type', 'capacity'])
group = model.Group.get(group_id)
if not group:
raise NotFound('Group was not found.')
obj_class = ckan.logic.model_name_to_class(model, obj_type)
obj = obj_class.get(obj_id)
if not obj:
raise NotFound('%s was not found.' % obj_type.title())
_check_access('member_create', context, data_dict)
# Look up existing, in case it exists
member = model.Session.query(model.Member).\
filter(model.Member.table_name == obj_type).\
filter(model.Member.table_id == obj.id).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == 'active').first()
if not member:
member = model.Member(table_name=obj_type,
table_id=obj.id,
group_id=group.id,
state='active')
member.capacity = capacity
model.Session.add(member)
model.repo.commit()
return model_dictize.member_dictize(member, context)
def _group_or_org_create(context, data_dict, is_org=False):
model = context['model']
user = context['user']
session = context['session']
data_dict['is_organization'] = is_org
upload = uploader.Upload('group')
upload.update_data_dict(data_dict, 'image_url',
'image_upload', 'clear_upload')
# get the schema
group_plugin = lib_plugins.lookup_group_plugin(
group_type=data_dict.get('type'))
try:
schema = group_plugin.form_to_db_schema_options({
'type': 'create', 'api': 'api_version' in context,
'context': context})
except AttributeError:
schema = group_plugin.form_to_db_schema()
if 'api_version' not in context:
# old plugins do not support passing the schema so we need
# to ensure they still work
try:
group_plugin.check_data_dict(data_dict, schema)
except TypeError:
group_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
group_plugin, context, data_dict, schema,
'organization_create' if is_org else 'group_create')
log.debug('group_create validate_errs=%r user=%s group=%s data_dict=%r',
errors, context.get('user'), data_dict.get('name'), data_dict)
if errors:
session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
group = model_save.group_dict_save(data, context)
if user:
admins = [model.User.by_name(user.decode('utf8'))]
else:
admins = []
model.setup_default_user_roles(group, admins)
# Needed to let extensions know the group id
session.flush()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.create(group)
if is_org:
activity_type = 'new organization'
else:
activity_type = 'new group'
user_id = model.User.by_name(user.decode('utf8')).id
activity_dict = {
'user_id': user_id,
'object_id': group.id,
'activity_type': activity_type,
}
activity_dict['data'] = {
'group': ckan.lib.dictization.table_dictize(group, context)
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
upload.upload(uploader.get_max_image_size())
if not context.get('defer_commit'):
model.repo.commit()
context["group"] = group
context["id"] = group.id
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': 'admin',
}
member_create_context = {
'model': model,
'user': user,
'ignore_auth': True, # we are not a member of the group at this point
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
return model_dictize.group_dictize(group, context)
def group_create(context, data_dict):
'''Create a new group.
You must be authorized to create groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface.
:param name: the name of the group, a string between 2 and 100 characters
long, containing only lowercase alphanumeric characters, ``-`` and
``_``
:type name: string
:param id: the id of the group (optional)
:type id: string
:param title: the title of the group (optional)
:type title: string
:param description: the description of the group (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the group's page
(optional)
:type image_url: string
:param type: the type of the group (optional),
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugins
associate themselves with different group types and provide custom
group handling behaviour for these types
Cannot be 'organization'
:type type: string
:param state: the current state of the group, e.g. ``'active'`` or
``'deleted'``, only active groups show up in search results and
other lists of groups, this parameter will be ignored if you are not
authorized to change the state of the group (optional, default:
``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the group's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to groups, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the group, a list
of dictionaries each with keys ``'name'`` (string, the id or name of
the dataset) and optionally ``'title'`` (string, the title of the
dataset)
:type packages: list of dictionaries
:param groups: the groups that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the group) and
optionally ``'capacity'`` (string, the capacity in which the group is
a member of the group)
:type groups: list of dictionaries
:param users: the users that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the user) and
optionally ``'capacity'`` (string, the capacity in which the user is
a member of the group)
:type users: list of dictionaries
:returns: the newly created group
:rtype: dictionary
'''
# wrapper for creating groups
if data_dict.get('type') == 'organization':
# FIXME better exception?
raise Exception(_('Trying to create an organization as a group'))
_check_access('group_create', context, data_dict)
return _group_or_org_create(context, data_dict)
def organization_create(context, data_dict):
'''Create a new organization.
You must be authorized to create organizations.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface.
:param name: the name of the organization, a string between 2 and
100 characters long, containing only lowercase alphanumeric
characters, ``-`` and ``_``
:type name: string
:param id: the id of the organization (optional)
:type id: string
:param title: the title of the organization (optional)
:type title: string
:param description: the description of the organization (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the
organization's page (optional)
:type image_url: string
:param state: the current state of the organization, e.g. ``'active'`` or
``'deleted'``, only active organizations show up in search results and
other lists of organizations, this parameter will be ignored if you
are not authorized to change the state of the organization
(optional, default: ``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the organization's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to organizations,
each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the organization,
a list of dictionaries each with keys ``'name'`` (string, the id
or name of the dataset) and optionally ``'title'`` (string, the
title of the dataset)
:type packages: list of dictionaries
:param users: the users that belong to the organization, a list
of dictionaries each with key ``'name'`` (string, the id or name
of the user) and optionally ``'capacity'`` (string, the capacity
in which the user is a member of the organization)
:type users: list of dictionaries
:returns: the newly created organization
:rtype: dictionary
'''
# wrapper for creating organizations
data_dict['type'] = 'organization'
_check_access('organization_create', context, data_dict)
return _group_or_org_create(context, data_dict, is_org=True)
@logic.auth_audit_exempt
def rating_create(context, data_dict):
'''Rate a dataset (package).
You must provide your API key in the Authorization header.
:param package: the name or id of the dataset to rate
:type package: string
:param rating: the rating to give to the dataset, an integer between 1 and
5
:type rating: int
:returns: a dictionary with two keys: ``'rating average'`` (the average
rating of the dataset you rated) and ``'rating count'`` (the number of
times the dataset has been rated)
:rtype: dictionary
'''
model = context['model']
user = context.get("user")
package_ref = data_dict.get('package')
rating = data_dict.get('rating')
opts_err = None
if not package_ref:
opts_err = _('You must supply a package id or name '
'(parameter "package").')
elif not rating:
opts_err = _('You must supply a rating (parameter "rating").')
else:
try:
rating_int = int(rating)
except ValueError:
opts_err = _('Rating must be an integer value.')
else:
package = model.Package.get(package_ref)
if rating < ratings.MIN_RATING or rating > ratings.MAX_RATING:
opts_err = _('Rating must be between %i and %i.') \
% (ratings.MIN_RATING, ratings.MAX_RATING)
elif not package:
opts_err = _('Not found') + ': %r' % package_ref
if opts_err:
raise ValidationError(opts_err)
user = model.User.by_name(user)
ratings.set_rating(user, package, rating_int)
package = model.Package.get(package_ref)
ret_dict = {'rating average': package.get_average_rating(),
'rating count': len(package.ratings)}
return ret_dict
def user_create(context, data_dict):
'''Create a new user.
You must be authorized to create users.
:param name: the name of the new user, a string between 2 and 100
characters in length, containing only lowercase alphanumeric
characters, ``-`` and ``_``
:type name: string
:param email: the email address for the new user
:type email: string
:param password: the password of the <PASSWORD>, a string of at least 4
characters
:type password: string
:param id: the id of the new user (optional)
:type id: string
:param fullname: the full name of the new user (optional)
:type fullname: string
:param about: a description of the new user (optional)
:type about: string
:param openid: (optional)
:type openid: string
:returns: the newly created yser
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or ckan.logic.schema.default_user_schema()
session = context['session']
_check_access('user_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
session.rollback()
raise ValidationError(errors)
user = model_save.user_dict_save(data, context)
# Flush the session to cause user.id to be initialised, because
# activity_create() (below) needs it.
session.flush()
activity_create_context = {
'model': model,
'user': context['user'],
'defer_commit': True,
'ignore_auth': True,
'session': session
}
activity_dict = {
'user_id': user.id,
'object_id': user.id,
'activity_type': 'new user',
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
if not context.get('defer_commit'):
model.repo.commit()
# A new context is required for dictizing the newly constructed user in
# order that all the new user's data is returned, in particular, the
# api_key.
#
# The context is copied so as not to clobber the caller's context dict.
user_dictize_context = context.copy()
user_dictize_context['keep_apikey'] = True
user_dictize_context['keep_email'] = True
user_dict = model_dictize.user_dictize(user, user_dictize_context)
context['user_obj'] = user
context['id'] = user.id
model.Dashboard.get(user.id) # Create dashboard for user.
log.debug('Created user {name}'.format(name=user.name))
return user_dict
def user_invite(context, data_dict):
'''Invite a new user.
You must be authorized to create group members.
:param email: the email of the user to be invited to the group
:type email: string
:param group_id: the id or name of the group
:type group_id: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created yser
:rtype: dictionary
'''
_check_access('user_invite', context, data_dict)
schema = context.get('schema',
ckan.logic.schema.default_user_invite_schema())
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
name = _get_random_username_from_email(data['email'])
password = str(random.SystemRandom().random())
data['name'] = name
data['password'] = password
data['state'] = ckan.model.State.PENDING
user_dict = _get_action('user_create')(context, data)
user = ckan.model.User.get(user_dict['id'])
member_dict = {
'username': user.id,
'id': data['group_id'],
'role': data['role']
}
_get_action('group_member_create')(context, member_dict)
mailer.send_invite(user)
return model_dictize.user_dictize(user, context)
def _get_random_username_from_email(email):
localpart = email.split('@')[0]
cleaned_localpart = re.sub(r'[^\w]', '-', localpart)
# if we can't create a unique user name within this many attempts
# then something else is probably wrong and we should give up
max_name_creation_attempts = 100
for i in range(max_name_creation_attempts):
random_number = random.SystemRandom().random() * 10000
name = '%s-%d' % (cleaned_localpart, random_number)
if not ckan.model.User.get(name):
return name
return cleaned_localpart
## Modifications for rest api
def package_create_rest(context, data_dict):
_check_access('package_create_rest', context, data_dict)
dictized_package = model_save.package_api_to_dict(data_dict, context)
dictized_after = _get_action('package_create')(context, dictized_package)
pkg = context['package']
package_dict = model_dictize.package_to_api(pkg, context)
data_dict['id'] = pkg.id
return package_dict
def group_create_rest(context, data_dict):
_check_access('group_create_rest', context, data_dict)
dictized_group = model_save.group_api_to_dict(data_dict, context)
dictized_after = _get_action('group_create')(context, dictized_group)
group = context['group']
group_dict = model_dictize.group_to_api(group, context)
data_dict['id'] = group.id
return group_dict
def vocabulary_create(context, data_dict):
'''Create a new tag vocabulary.
You must be a sysadmin to create vocabularies.
:param name: the name of the new vocabulary, e.g. ``'Genre'``
:type name: string
:param tags: the new tags to add to the new vocabulary, for the format of
tag dictionaries see :py:func:`tag_create`
:type tags: list of tag dictionaries
:returns: the newly-created vocabulary
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or \
ckan.logic.schema.default_create_vocabulary_schema()
_check_access('vocabulary_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
vocabulary = model_save.vocabulary_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug('Created Vocabulary %s' % vocabulary.name)
return model_dictize.vocabulary_dictize(vocabulary, context)
def activity_create(context, activity_dict, **kw):
'''Create a new activity stream activity.
You must be a sysadmin to create new activities.
:param user_id: the name or id of the user who carried out the activity,
e.g. ``'seanh'``
:type user_id: string
:param object_id: the name or id of the object of the activity, e.g.
``'my_dataset'``
:param activity_type: the type of the activity, this must be an activity
type that CKAN knows how to render, e.g. ``'new package'``,
``'changed user'``, ``'deleted group'`` etc.
:type activity_type: string
:param data: any additional data about the activity
:type data: dictionary
:returns: the newly created activity
:rtype: dictionary
'''
_check_access('activity_create', context, activity_dict)
# this action had a ignore_auth param which has been removed
# removed in 2.2
if 'ignore_auth' in kw:
raise Exception('Activity Stream calling parameters have changed '
'ignore_auth must be passed in the context not as '
'a param')
if not paste.deploy.converters.asbool(
config.get('ckan.activity_streams_enabled', 'true')):
return
model = context['model']
# Any revision_id that the caller attempts to pass in the activity_dict is
# ignored and overwritten here.
if getattr(model.Session, 'revision', None):
activity_dict['revision_id'] = model.Session.revision.id
else:
activity_dict['revision_id'] = None
schema = context.get('schema') or \
ckan.logic.schema.default_create_activity_schema()
data, errors = _validate(activity_dict, schema, context)
if errors:
raise ValidationError(errors)
activity = model_save.activity_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created '%s' activity" % activity.activity_type)
return model_dictize.activity_dictize(activity, context)
def package_relationship_create_rest(context, data_dict):
# rename keys
key_map = {'id': 'subject',
'id2': 'object',
'rel': 'type'}
# Don't be destructive to enable parameter values for
# object and type to override the URL parameters.
data_dict = ckan.logic.action.rename_keys(data_dict, key_map,
destructive=False)
relationship_dict = _get_action('package_relationship_create')(
context, data_dict)
return relationship_dict
def tag_create(context, data_dict):
'''Create a new vocabulary tag.
You must be a sysadmin to create vocabulary tags.
You can only use this function to create tags that belong to a vocabulary,
not to create free tags. (To create a new free tag simply add the tag to
a package, e.g. using the
:py:func:`~ckan.logic.action.update.package_update` function.)
:param name: the name for the new tag, a string between 2 and 100
characters long containing only alphanumeric characters and ``-``,
``_`` and ``.``, e.g. ``'Jazz'``
:type name: string
:param vocabulary_id: the name or id of the vocabulary that the new tag
should be added to, e.g. ``'Genre'``
:type vocabulary_id: string
:returns: the newly-created tag
:rtype: dictionary
'''
model = context['model']
_check_access('tag_create', context, data_dict)
schema = context.get('schema') or \
ckan.logic.schema.default_create_tag_schema()
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
tag = model_save.tag_dict_save(data_dict, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created tag '%s' " % tag)
return model_dictize.tag_dictize(tag, context)
def follow_user(context, data_dict):
'''Start following another user.
You must provide your API key in the Authorization header.
:param id: the id or name of the user to follow, e.g. ``'joeuser'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the other user
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_user_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow herself.
if userobj.id == validated_data_dict['id']:
message = _('You cannot follow yourself')
raise ValidationError({'message': message}, error_summary=message)
# Don't let a user follow someone she is already following.
if model.UserFollowingUser.is_following(userobj.id,
validated_data_dict['id']):
followeduserobj = model.User.get(validated_data_dict['id'])
name = followeduserobj.display_name
message = _('You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(
validated_data_dict, context, model.UserFollowingUser)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following user {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_user_dictize(follower, context)
def follow_dataset(context, data_dict):
'''Start following a dataset.
You must provide your API key in the Authorization header.
:param id: the id or name of the dataset to follow, e.g. ``'warandpeace'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the dataset
:rtype: dictionary
'''
if not 'user' in context:
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_dataset_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a dataset she is already following.
if model.UserFollowingDataset.is_following(userobj.id,
validated_data_dict['id']):
# FIXME really package model should have this logic and provide
# 'display_name' like users and groups
pkgobj = model.Package.get(validated_data_dict['id'])
name = pkgobj.title or pkgobj.name or pkgobj.id
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingDataset)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following dataset {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_dataset_dictize(follower, context)
def _group_or_org_member_create(context, data_dict, is_org=False):
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
model = context['model']
user = context['user']
session = context['session']
schema = ckan.logic.schema.member_schema()
data, errors = _validate(data_dict, schema, context)
username = _get_or_bust(data_dict, 'username')
role = data_dict.get('role')
group_id = data_dict.get('id')
group = model.Group.get(group_id)
result = model.User.get(username)
if result:
user_id = result.id
else:
message = _(u'User {username} does not exist.').format(
username=username)
raise ValidationError({'message': message}, error_summary=message)
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': role,
}
member_create_context = {
'model': model,
'user': user,
'session': session,
'ignore_auth': context.get('ignore_auth'),
}
logic.get_action('member_create')(member_create_context, member_dict)
def group_member_create(context, data_dict):
'''Make a user a member of a group.
You must be authorized to edit the group.
:param id: the id or name of the group
:type id: string
:param username: name or id of the user to be made member of the group
:type username: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('group_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict)
def organization_member_create(context, data_dict):
'''Make a user a member of an organization.
You must be authorized to edit the organization.
:param id: the id or name of the organization
:type id: string
:param username: name or id of the user to be made member of the
organization
:type username: string
:param role: role of the user in the organization. One of ``member``,
``editor``, or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('organization_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict, is_org=True)
def follow_group(context, data_dict):
'''Start following a group.
You must provide your API key in the Authorization header.
:param id: the id or name of the group to follow, e.g. ``'roger'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the group
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
schema = context.get('schema',
ckan.logic.schema.default_follow_group_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a group she is already following.
if model.UserFollowingGroup.is_following(userobj.id,
validated_data_dict['id']):
groupobj = model.Group.get(validated_data_dict['id'])
name = groupobj.display_name
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingGroup)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following group {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_group_dictize(follower, context)
| '''API functions for adding data to CKAN.'''
import logging
import random
import re
from pylons import config
import paste.deploy.converters
from sqlalchemy import func
import ckan.lib.plugins as lib_plugins
import ckan.logic as logic
import ckan.rating as ratings
import ckan.plugins as plugins
import ckan.lib.dictization
import ckan.logic.action
import ckan.logic.schema
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.dictization.model_save as model_save
import ckan.lib.navl.dictization_functions
import ckan.lib.uploader as uploader
import ckan.lib.navl.validators as validators
import ckan.lib.mailer as mailer
import ckan.lib.datapreview as datapreview
from ckan.common import _
# FIXME this looks nasty and should be shared better
from ckan.logic.action.update import _update_package_relationship
log = logging.getLogger(__name__)
# Define some shortcuts
# Ensure they are module-private so that they don't get loaded as available
# actions in the action API.
_validate = ckan.lib.navl.dictization_functions.validate
_check_access = logic.check_access
_get_action = logic.get_action
ValidationError = logic.ValidationError
NotFound = logic.NotFound
_get_or_bust = logic.get_or_bust
def package_create(context, data_dict):
'''Create a new dataset (package).
You must be authorized to create new datasets. If you specify any groups
for the new dataset, you must also be authorized to edit these groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IDatasetForm` plugin interface.
:param name: the name of the new dataset, must be between 2 and 100
characters long and contain only lowercase alphanumeric characters,
``-`` and ``_``, e.g. ``'warandpeace'``
:type name: string
:param title: the title of the dataset (optional, default: same as
``name``)
:type title: string
:param author: the name of the dataset's author (optional)
:type author: string
:param author_email: the email address of the dataset's author (optional)
:type author_email: string
:param maintainer: the name of the dataset's maintainer (optional)
:type maintainer: string
:param maintainer_email: the email address of the dataset's maintainer
(optional)
:type maintainer_email: string
:param license_id: the id of the dataset's license, see
:py:func:`~ckan.logic.action.get.license_list` for available values
(optional)
:type license_id: license id string
:param notes: a description of the dataset (optional)
:type notes: string
:param url: a URL for the dataset's source (optional)
:type url: string
:param version: (optional)
:type version: string, no longer than 100 characters
:param state: the current state of the dataset, e.g. ``'active'`` or
``'deleted'``, only active datasets show up in search results and
other lists of datasets, this parameter will be ignored if you are not
authorized to change the state of the dataset (optional, default:
``'active'``)
:type state: string
:param type: the type of the dataset (optional),
:py:class:`~ckan.plugins.interfaces.IDatasetForm` plugins
associate themselves with different dataset types and provide custom
dataset handling behaviour for these types
:type type: string
:param resources: the dataset's resources, see
:py:func:`resource_create` for the format of resource dictionaries
(optional)
:type resources: list of resource dictionaries
:param tags: the dataset's tags, see :py:func:`tag_create` for the format
of tag dictionaries (optional)
:type tags: list of tag dictionaries
:param extras: the dataset's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to datasets, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string)
:type extras: list of dataset extra dictionaries
:param relationships_as_object: see :py:func:`package_relationship_create`
for the format of relationship dictionaries (optional)
:type relationships_as_object: list of relationship dictionaries
:param relationships_as_subject: see :py:func:`package_relationship_create`
for the format of relationship dictionaries (optional)
:type relationships_as_subject: list of relationship dictionaries
:param groups: the groups to which the dataset belongs (optional), each
group dictionary should have one or more of the following keys which
identify an existing group:
``'id'`` (the id of the group, string), ``'name'`` (the name of the
group, string), ``'title'`` (the title of the group, string), to see
which groups exist call :py:func:`~ckan.logic.action.get.group_list`
:type groups: list of dictionaries
:param owner_org: the id of the dataset's owning organization, see
:py:func:`~ckan.logic.action.get.organization_list` or
:py:func:`~ckan.logic.action.get.organization_list_for_user` for
available values (optional)
:type owner_org: string
:returns: the newly created dataset (unless 'return_id_only' is set to True
in the context, in which case just the dataset id will
be returned)
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_type = data_dict.get('type')
package_plugin = lib_plugins.lookup_package_plugin(package_type)
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.create_package_schema()
_check_access('package_create', context, data_dict)
if 'api_version' not in context:
# check_data_dict() is deprecated. If the package_plugin has a
# check_data_dict() we'll call it, if it doesn't have the method we'll
# do nothing.
check_data_dict = getattr(package_plugin, 'check_data_dict', None)
if check_data_dict:
try:
check_data_dict(data_dict, schema)
except TypeError:
# Old plugins do not support passing the schema so we need
# to ensure they still work
package_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
package_plugin, context, data_dict, schema, 'package_create')
log.debug('package_create validate_errs=%r user=%s package=%s data=%r',
errors, context.get('user'),
data.get('name'), data_dict)
if errors:
model.Session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
admins = []
if user:
user_obj = model.User.by_name(user.decode('utf8'))
if user_obj:
admins = [user_obj]
data['creator_user_id'] = user_obj.id
pkg = model_save.package_dict_save(data, context)
model.setup_default_user_roles(pkg, admins)
# Needed to let extensions know the package id
model.Session.flush()
data['id'] = pkg.id
context_org_update = context.copy()
context_org_update['ignore_auth'] = True
context_org_update['defer_commit'] = True
_get_action('package_owner_org_update')(context_org_update,
{'id': pkg.id,
'organization_id': pkg.owner_org})
for item in plugins.PluginImplementations(plugins.IPackageController):
item.create(pkg)
item.after_create(context, data)
if not context.get('defer_commit'):
model.repo.commit()
## need to let rest api create
context["package"] = pkg
## this is added so that the rest controller can make a new location
context["id"] = pkg.id
log.debug('Created object %s' % pkg.name)
# Make sure that a user provided schema is not used on package_show
context.pop('schema', None)
return_id_only = context.get('return_id_only', False)
output = context['id'] if return_id_only \
else _get_action('package_show')(context, {'id': context['id']})
return output
def resource_create(context, data_dict):
'''Appends a new resource to a datasets list of resources.
:param package_id: id of package that the resource should be added to.
:type package_id: string
:param url: url of resource
:type url: string
:param revision_id: (optional)
:type revision_id: string
:param description: (optional)
:type description: string
:param format: (optional)
:type format: string
:param hash: (optional)
:type hash: string
:param name: (optional)
:type name: string
:param resource_type: (optional)
:type resource_type: string
:param mimetype: (optional)
:type mimetype: string
:param mimetype_inner: (optional)
:type mimetype_inner: string
:param webstore_url: (optional)
:type webstore_url: string
:param cache_url: (optional)
:type cache_url: string
:param size: (optional)
:type size: int
:param created: (optional)
:type created: iso date string
:param last_modified: (optional)
:type last_modified: iso date string
:param cache_last_updated: (optional)
:type cache_last_updated: iso date string
:param webstore_last_updated: (optional)
:type webstore_last_updated: iso date string
:param upload: (optional)
:type upload: FieldStorage (optional) needs multipart/form-data
:returns: the newly created resource
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_id = _get_or_bust(data_dict, 'package_id')
data_dict.pop('package_id')
_get_or_bust(data_dict, 'url')
pkg_dict = _get_action('package_show')(context, {'id': package_id})
_check_access('resource_create', context, data_dict)
for plugin in plugins.PluginImplementations(plugins.IResourceController):
plugin.before_create(context, data_dict)
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
upload = uploader.ResourceUpload(data_dict)
pkg_dict['resources'].append(data_dict)
try:
context['defer_commit'] = True
context['use_cache'] = False
_get_action('package_update')(context, pkg_dict)
context.pop('defer_commit')
except ValidationError, e:
errors = e.error_dict['resources'][-1]
raise ValidationError(errors)
## Get out resource_id resource from model as it will not appear in
## package_show until after commit
upload.upload(context['package'].resources[-1].id,
uploader.get_max_resource_size())
model.repo.commit()
## Run package show again to get out actual last_resource
pkg_dict = _get_action('package_show')(context, {'id': package_id})
resource = pkg_dict['resources'][-1]
for plugin in plugins.PluginImplementations(plugins.IResourceController):
plugin.after_create(context, resource)
return resource
def resource_view_create(context, data_dict):
'''Creates a new resource view.
:param resource_id: id of the resource
:type resource_id: string
:param title: the title of the view
:type title: string
:param description: a description of the view (optional)
:type description: string
:param view_type: type of view
:type view_type: string
:param config: options necessary to recreate a view state (optional)
:type config: JSON string
:returns: the newly created resource view
:rtype: dictionary
'''
model = context['model']
schema = (context.get('schema') or
ckan.logic.schema.default_create_resource_view_schema())
resource_id = _get_or_bust(data_dict, 'resource_id')
view_type = _get_or_bust(data_dict, 'view_type')
view_plugin = datapreview.get_view_plugin(view_type)
if not view_plugin:
raise ValidationError(
{"view_type": "No plugin found for view_type {view_type}".format(
view_type=view_type
)}
)
plugin_schema = view_plugin.info().get('schema', {})
schema.update(plugin_schema)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
_check_access('resource_view_create', context, data_dict)
if context.get('preview'):
return data
max_order = model.Session.query(
func.max(model.ResourceView.order)
).filter_by(resource_id=resource_id).first()
order = 0
if max_order[0] is not None:
order = max_order[0] + 1
data['order'] = order
resource_view = model_save.resource_view_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
return model_dictize.resource_view_dictize(resource_view, context)
def related_create(context, data_dict):
'''Add a new related item to a dataset.
You must provide your API key in the Authorization header.
:param title: the title of the related item
:type title: string
:param type: the type of the related item, e.g. ``'Application'``,
``'Idea'`` or ``'Visualisation'``
:type type: string
:param id: the id of the related item (optional)
:type id: string
:param description: the description of the related item (optional)
:type description: string
:param url: the URL to the related item (optional)
:type url: string
:param image_url: the URL to the image for the related item (optional)
:type image_url: string
:param dataset_id: the name or id of the dataset that the related item
belongs to (optional)
:type dataset_id: string
:returns: the newly created related item
:rtype: dictionary
'''
model = context['model']
session = context['session']
user = context['user']
userobj = model.User.get(user)
_check_access('related_create', context, data_dict)
data_dict["owner_id"] = userobj.id
data, errors = _validate(
data_dict, ckan.logic.schema.default_related_schema(), context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
related = model_save.related_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
dataset_dict = None
if 'dataset_id' in data_dict:
dataset = model.Package.get(data_dict['dataset_id'])
dataset.related.append(related)
model.repo.commit_and_remove()
dataset_dict = ckan.lib.dictization.table_dictize(dataset, context)
session.flush()
related_dict = model_dictize.related_dictize(related, context)
activity_dict = {
'user_id': userobj.id,
'object_id': related.id,
'activity_type': 'new related item',
}
activity_dict['data'] = {
'related': related_dict,
'dataset': dataset_dict,
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context,
activity_dict)
session.commit()
context["related"] = related
context["id"] = related.id
log.debug('Created object %s' % related.title)
return related_dict
def package_relationship_create(context, data_dict):
'''Create a relationship between two datasets (packages).
You must be authorized to edit both the subject and the object datasets.
:param subject: the id or name of the dataset that is the subject of the
relationship
:type subject: string
:param object: the id or name of the dataset that is the object of the
relationship
:param type: the type of the relationship, one of ``'depends_on'``,
``'dependency_of'``, ``'derives_from'``, ``'has_derivation'``,
``'links_to'``, ``'linked_from'``, ``'child_of'`` or ``'parent_of'``
:type type: string
:param comment: a comment about the relationship (optional)
:type comment: string
:returns: the newly created package relationship
:rtype: dictionary
'''
model = context['model']
user = context['user']
schema = context.get('schema') \
or ckan.logic.schema.default_create_relationship_schema()
api = context.get('api_version')
ref_package_by = 'id' if api == 2 else 'name'
id, id2, rel_type = _get_or_bust(data_dict, ['subject', 'object', 'type'])
comment = data_dict.get('comment', u'')
pkg1 = model.Package.get(id)
pkg2 = model.Package.get(id2)
if not pkg1:
raise NotFound('Subject package %r was not found.' % id)
if not pkg2:
return NotFound('Object package %r was not found.' % id2)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
_check_access('package_relationship_create', context, data_dict)
# Create a Package Relationship.
existing_rels = pkg1.get_relationships_with(pkg2, rel_type)
if existing_rels:
return _update_package_relationship(existing_rels[0],
comment, context)
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Create package relationship: %s %s %s') \
% (pkg1, rel_type, pkg2)
rel = pkg1.add_relationship(rel_type, pkg2, comment=comment)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
context['relationship'] = rel
relationship_dicts = rel.as_dict(ref_package_by=ref_package_by)
return relationship_dicts
def member_create(context, data_dict=None):
'''Make an object (e.g. a user, dataset or group) a member of a group.
If the object is already a member of the group then the capacity of the
membership will be updated.
You must be authorized to edit the group.
:param id: the id or name of the group to add the object to
:type id: string
:param object: the id or name of the object to add
:type object: string
:param object_type: the type of the object being added, e.g. ``'package'``
or ``'user'``
:type object_type: string
:param capacity: the capacity of the membership
:type capacity: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
model = context['model']
user = context['user']
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create member object %s') \
% data_dict.get('name', '')
group_id, obj_id, obj_type, capacity = \
_get_or_bust(data_dict, ['id', 'object', 'object_type', 'capacity'])
group = model.Group.get(group_id)
if not group:
raise NotFound('Group was not found.')
obj_class = ckan.logic.model_name_to_class(model, obj_type)
obj = obj_class.get(obj_id)
if not obj:
raise NotFound('%s was not found.' % obj_type.title())
_check_access('member_create', context, data_dict)
# Look up existing, in case it exists
member = model.Session.query(model.Member).\
filter(model.Member.table_name == obj_type).\
filter(model.Member.table_id == obj.id).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == 'active').first()
if not member:
member = model.Member(table_name=obj_type,
table_id=obj.id,
group_id=group.id,
state='active')
member.capacity = capacity
model.Session.add(member)
model.repo.commit()
return model_dictize.member_dictize(member, context)
def _group_or_org_create(context, data_dict, is_org=False):
model = context['model']
user = context['user']
session = context['session']
data_dict['is_organization'] = is_org
upload = uploader.Upload('group')
upload.update_data_dict(data_dict, 'image_url',
'image_upload', 'clear_upload')
# get the schema
group_plugin = lib_plugins.lookup_group_plugin(
group_type=data_dict.get('type'))
try:
schema = group_plugin.form_to_db_schema_options({
'type': 'create', 'api': 'api_version' in context,
'context': context})
except AttributeError:
schema = group_plugin.form_to_db_schema()
if 'api_version' not in context:
# old plugins do not support passing the schema so we need
# to ensure they still work
try:
group_plugin.check_data_dict(data_dict, schema)
except TypeError:
group_plugin.check_data_dict(data_dict)
data, errors = lib_plugins.plugin_validate(
group_plugin, context, data_dict, schema,
'organization_create' if is_org else 'group_create')
log.debug('group_create validate_errs=%r user=%s group=%s data_dict=%r',
errors, context.get('user'), data_dict.get('name'), data_dict)
if errors:
session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
group = model_save.group_dict_save(data, context)
if user:
admins = [model.User.by_name(user.decode('utf8'))]
else:
admins = []
model.setup_default_user_roles(group, admins)
# Needed to let extensions know the group id
session.flush()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.create(group)
if is_org:
activity_type = 'new organization'
else:
activity_type = 'new group'
user_id = model.User.by_name(user.decode('utf8')).id
activity_dict = {
'user_id': user_id,
'object_id': group.id,
'activity_type': activity_type,
}
activity_dict['data'] = {
'group': ckan.lib.dictization.table_dictize(group, context)
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
upload.upload(uploader.get_max_image_size())
if not context.get('defer_commit'):
model.repo.commit()
context["group"] = group
context["id"] = group.id
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': 'admin',
}
member_create_context = {
'model': model,
'user': user,
'ignore_auth': True, # we are not a member of the group at this point
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
return model_dictize.group_dictize(group, context)
def group_create(context, data_dict):
'''Create a new group.
You must be authorized to create groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface.
:param name: the name of the group, a string between 2 and 100 characters
long, containing only lowercase alphanumeric characters, ``-`` and
``_``
:type name: string
:param id: the id of the group (optional)
:type id: string
:param title: the title of the group (optional)
:type title: string
:param description: the description of the group (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the group's page
(optional)
:type image_url: string
:param type: the type of the group (optional),
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugins
associate themselves with different group types and provide custom
group handling behaviour for these types
Cannot be 'organization'
:type type: string
:param state: the current state of the group, e.g. ``'active'`` or
``'deleted'``, only active groups show up in search results and
other lists of groups, this parameter will be ignored if you are not
authorized to change the state of the group (optional, default:
``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the group's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to groups, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the group, a list
of dictionaries each with keys ``'name'`` (string, the id or name of
the dataset) and optionally ``'title'`` (string, the title of the
dataset)
:type packages: list of dictionaries
:param groups: the groups that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the group) and
optionally ``'capacity'`` (string, the capacity in which the group is
a member of the group)
:type groups: list of dictionaries
:param users: the users that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the user) and
optionally ``'capacity'`` (string, the capacity in which the user is
a member of the group)
:type users: list of dictionaries
:returns: the newly created group
:rtype: dictionary
'''
# wrapper for creating groups
if data_dict.get('type') == 'organization':
# FIXME better exception?
raise Exception(_('Trying to create an organization as a group'))
_check_access('group_create', context, data_dict)
return _group_or_org_create(context, data_dict)
def organization_create(context, data_dict):
'''Create a new organization.
You must be authorized to create organizations.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the
:py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface.
:param name: the name of the organization, a string between 2 and
100 characters long, containing only lowercase alphanumeric
characters, ``-`` and ``_``
:type name: string
:param id: the id of the organization (optional)
:type id: string
:param title: the title of the organization (optional)
:type title: string
:param description: the description of the organization (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the
organization's page (optional)
:type image_url: string
:param state: the current state of the organization, e.g. ``'active'`` or
``'deleted'``, only active organizations show up in search results and
other lists of organizations, this parameter will be ignored if you
are not authorized to change the state of the organization
(optional, default: ``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the organization's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to organizations,
each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the organization,
a list of dictionaries each with keys ``'name'`` (string, the id
or name of the dataset) and optionally ``'title'`` (string, the
title of the dataset)
:type packages: list of dictionaries
:param users: the users that belong to the organization, a list
of dictionaries each with key ``'name'`` (string, the id or name
of the user) and optionally ``'capacity'`` (string, the capacity
in which the user is a member of the organization)
:type users: list of dictionaries
:returns: the newly created organization
:rtype: dictionary
'''
# wrapper for creating organizations
data_dict['type'] = 'organization'
_check_access('organization_create', context, data_dict)
return _group_or_org_create(context, data_dict, is_org=True)
@logic.auth_audit_exempt
def rating_create(context, data_dict):
'''Rate a dataset (package).
You must provide your API key in the Authorization header.
:param package: the name or id of the dataset to rate
:type package: string
:param rating: the rating to give to the dataset, an integer between 1 and
5
:type rating: int
:returns: a dictionary with two keys: ``'rating average'`` (the average
rating of the dataset you rated) and ``'rating count'`` (the number of
times the dataset has been rated)
:rtype: dictionary
'''
model = context['model']
user = context.get("user")
package_ref = data_dict.get('package')
rating = data_dict.get('rating')
opts_err = None
if not package_ref:
opts_err = _('You must supply a package id or name '
'(parameter "package").')
elif not rating:
opts_err = _('You must supply a rating (parameter "rating").')
else:
try:
rating_int = int(rating)
except ValueError:
opts_err = _('Rating must be an integer value.')
else:
package = model.Package.get(package_ref)
if rating < ratings.MIN_RATING or rating > ratings.MAX_RATING:
opts_err = _('Rating must be between %i and %i.') \
% (ratings.MIN_RATING, ratings.MAX_RATING)
elif not package:
opts_err = _('Not found') + ': %r' % package_ref
if opts_err:
raise ValidationError(opts_err)
user = model.User.by_name(user)
ratings.set_rating(user, package, rating_int)
package = model.Package.get(package_ref)
ret_dict = {'rating average': package.get_average_rating(),
'rating count': len(package.ratings)}
return ret_dict
def user_create(context, data_dict):
'''Create a new user.
You must be authorized to create users.
:param name: the name of the new user, a string between 2 and 100
characters in length, containing only lowercase alphanumeric
characters, ``-`` and ``_``
:type name: string
:param email: the email address for the new user
:type email: string
:param password: the password of the <PASSWORD>, a string of at least 4
characters
:type password: string
:param id: the id of the new user (optional)
:type id: string
:param fullname: the full name of the new user (optional)
:type fullname: string
:param about: a description of the new user (optional)
:type about: string
:param openid: (optional)
:type openid: string
:returns: the newly created yser
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or ckan.logic.schema.default_user_schema()
session = context['session']
_check_access('user_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
session.rollback()
raise ValidationError(errors)
user = model_save.user_dict_save(data, context)
# Flush the session to cause user.id to be initialised, because
# activity_create() (below) needs it.
session.flush()
activity_create_context = {
'model': model,
'user': context['user'],
'defer_commit': True,
'ignore_auth': True,
'session': session
}
activity_dict = {
'user_id': user.id,
'object_id': user.id,
'activity_type': 'new user',
}
logic.get_action('activity_create')(activity_create_context, activity_dict)
if not context.get('defer_commit'):
model.repo.commit()
# A new context is required for dictizing the newly constructed user in
# order that all the new user's data is returned, in particular, the
# api_key.
#
# The context is copied so as not to clobber the caller's context dict.
user_dictize_context = context.copy()
user_dictize_context['keep_apikey'] = True
user_dictize_context['keep_email'] = True
user_dict = model_dictize.user_dictize(user, user_dictize_context)
context['user_obj'] = user
context['id'] = user.id
model.Dashboard.get(user.id) # Create dashboard for user.
log.debug('Created user {name}'.format(name=user.name))
return user_dict
def user_invite(context, data_dict):
'''Invite a new user.
You must be authorized to create group members.
:param email: the email of the user to be invited to the group
:type email: string
:param group_id: the id or name of the group
:type group_id: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created yser
:rtype: dictionary
'''
_check_access('user_invite', context, data_dict)
schema = context.get('schema',
ckan.logic.schema.default_user_invite_schema())
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
name = _get_random_username_from_email(data['email'])
password = str(random.SystemRandom().random())
data['name'] = name
data['password'] = password
data['state'] = ckan.model.State.PENDING
user_dict = _get_action('user_create')(context, data)
user = ckan.model.User.get(user_dict['id'])
member_dict = {
'username': user.id,
'id': data['group_id'],
'role': data['role']
}
_get_action('group_member_create')(context, member_dict)
mailer.send_invite(user)
return model_dictize.user_dictize(user, context)
def _get_random_username_from_email(email):
localpart = email.split('@')[0]
cleaned_localpart = re.sub(r'[^\w]', '-', localpart)
# if we can't create a unique user name within this many attempts
# then something else is probably wrong and we should give up
max_name_creation_attempts = 100
for i in range(max_name_creation_attempts):
random_number = random.SystemRandom().random() * 10000
name = '%s-%d' % (cleaned_localpart, random_number)
if not ckan.model.User.get(name):
return name
return cleaned_localpart
## Modifications for rest api
def package_create_rest(context, data_dict):
_check_access('package_create_rest', context, data_dict)
dictized_package = model_save.package_api_to_dict(data_dict, context)
dictized_after = _get_action('package_create')(context, dictized_package)
pkg = context['package']
package_dict = model_dictize.package_to_api(pkg, context)
data_dict['id'] = pkg.id
return package_dict
def group_create_rest(context, data_dict):
_check_access('group_create_rest', context, data_dict)
dictized_group = model_save.group_api_to_dict(data_dict, context)
dictized_after = _get_action('group_create')(context, dictized_group)
group = context['group']
group_dict = model_dictize.group_to_api(group, context)
data_dict['id'] = group.id
return group_dict
def vocabulary_create(context, data_dict):
'''Create a new tag vocabulary.
You must be a sysadmin to create vocabularies.
:param name: the name of the new vocabulary, e.g. ``'Genre'``
:type name: string
:param tags: the new tags to add to the new vocabulary, for the format of
tag dictionaries see :py:func:`tag_create`
:type tags: list of tag dictionaries
:returns: the newly-created vocabulary
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or \
ckan.logic.schema.default_create_vocabulary_schema()
_check_access('vocabulary_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
vocabulary = model_save.vocabulary_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug('Created Vocabulary %s' % vocabulary.name)
return model_dictize.vocabulary_dictize(vocabulary, context)
def activity_create(context, activity_dict, **kw):
'''Create a new activity stream activity.
You must be a sysadmin to create new activities.
:param user_id: the name or id of the user who carried out the activity,
e.g. ``'seanh'``
:type user_id: string
:param object_id: the name or id of the object of the activity, e.g.
``'my_dataset'``
:param activity_type: the type of the activity, this must be an activity
type that CKAN knows how to render, e.g. ``'new package'``,
``'changed user'``, ``'deleted group'`` etc.
:type activity_type: string
:param data: any additional data about the activity
:type data: dictionary
:returns: the newly created activity
:rtype: dictionary
'''
_check_access('activity_create', context, activity_dict)
# this action had a ignore_auth param which has been removed
# removed in 2.2
if 'ignore_auth' in kw:
raise Exception('Activity Stream calling parameters have changed '
'ignore_auth must be passed in the context not as '
'a param')
if not paste.deploy.converters.asbool(
config.get('ckan.activity_streams_enabled', 'true')):
return
model = context['model']
# Any revision_id that the caller attempts to pass in the activity_dict is
# ignored and overwritten here.
if getattr(model.Session, 'revision', None):
activity_dict['revision_id'] = model.Session.revision.id
else:
activity_dict['revision_id'] = None
schema = context.get('schema') or \
ckan.logic.schema.default_create_activity_schema()
data, errors = _validate(activity_dict, schema, context)
if errors:
raise ValidationError(errors)
activity = model_save.activity_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created '%s' activity" % activity.activity_type)
return model_dictize.activity_dictize(activity, context)
def package_relationship_create_rest(context, data_dict):
# rename keys
key_map = {'id': 'subject',
'id2': 'object',
'rel': 'type'}
# Don't be destructive to enable parameter values for
# object and type to override the URL parameters.
data_dict = ckan.logic.action.rename_keys(data_dict, key_map,
destructive=False)
relationship_dict = _get_action('package_relationship_create')(
context, data_dict)
return relationship_dict
def tag_create(context, data_dict):
'''Create a new vocabulary tag.
You must be a sysadmin to create vocabulary tags.
You can only use this function to create tags that belong to a vocabulary,
not to create free tags. (To create a new free tag simply add the tag to
a package, e.g. using the
:py:func:`~ckan.logic.action.update.package_update` function.)
:param name: the name for the new tag, a string between 2 and 100
characters long containing only alphanumeric characters and ``-``,
``_`` and ``.``, e.g. ``'Jazz'``
:type name: string
:param vocabulary_id: the name or id of the vocabulary that the new tag
should be added to, e.g. ``'Genre'``
:type vocabulary_id: string
:returns: the newly-created tag
:rtype: dictionary
'''
model = context['model']
_check_access('tag_create', context, data_dict)
schema = context.get('schema') or \
ckan.logic.schema.default_create_tag_schema()
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
tag = model_save.tag_dict_save(data_dict, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created tag '%s' " % tag)
return model_dictize.tag_dictize(tag, context)
def follow_user(context, data_dict):
'''Start following another user.
You must provide your API key in the Authorization header.
:param id: the id or name of the user to follow, e.g. ``'joeuser'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the other user
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_user_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow herself.
if userobj.id == validated_data_dict['id']:
message = _('You cannot follow yourself')
raise ValidationError({'message': message}, error_summary=message)
# Don't let a user follow someone she is already following.
if model.UserFollowingUser.is_following(userobj.id,
validated_data_dict['id']):
followeduserobj = model.User.get(validated_data_dict['id'])
name = followeduserobj.display_name
message = _('You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(
validated_data_dict, context, model.UserFollowingUser)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following user {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_user_dictize(follower, context)
def follow_dataset(context, data_dict):
'''Start following a dataset.
You must provide your API key in the Authorization header.
:param id: the id or name of the dataset to follow, e.g. ``'warandpeace'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the dataset
:rtype: dictionary
'''
if not 'user' in context:
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_dataset_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a dataset she is already following.
if model.UserFollowingDataset.is_following(userobj.id,
validated_data_dict['id']):
# FIXME really package model should have this logic and provide
# 'display_name' like users and groups
pkgobj = model.Package.get(validated_data_dict['id'])
name = pkgobj.title or pkgobj.name or pkgobj.id
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingDataset)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following dataset {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_dataset_dictize(follower, context)
def _group_or_org_member_create(context, data_dict, is_org=False):
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
model = context['model']
user = context['user']
session = context['session']
schema = ckan.logic.schema.member_schema()
data, errors = _validate(data_dict, schema, context)
username = _get_or_bust(data_dict, 'username')
role = data_dict.get('role')
group_id = data_dict.get('id')
group = model.Group.get(group_id)
result = model.User.get(username)
if result:
user_id = result.id
else:
message = _(u'User {username} does not exist.').format(
username=username)
raise ValidationError({'message': message}, error_summary=message)
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': role,
}
member_create_context = {
'model': model,
'user': user,
'session': session,
'ignore_auth': context.get('ignore_auth'),
}
logic.get_action('member_create')(member_create_context, member_dict)
def group_member_create(context, data_dict):
'''Make a user a member of a group.
You must be authorized to edit the group.
:param id: the id or name of the group
:type id: string
:param username: name or id of the user to be made member of the group
:type username: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('group_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict)
def organization_member_create(context, data_dict):
'''Make a user a member of an organization.
You must be authorized to edit the organization.
:param id: the id or name of the organization
:type id: string
:param username: name or id of the user to be made member of the
organization
:type username: string
:param role: role of the user in the organization. One of ``member``,
``editor``, or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('organization_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict, is_org=True)
def follow_group(context, data_dict):
'''Start following a group.
You must provide your API key in the Authorization header.
:param id: the id or name of the group to follow, e.g. ``'roger'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the group
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
schema = context.get('schema',
ckan.logic.schema.default_follow_group_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a group she is already following.
if model.UserFollowingGroup.is_following(userobj.id,
validated_data_dict['id']):
groupobj = model.Group.get(validated_data_dict['id'])
name = groupobj.display_name
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingGroup)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following group {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_group_dictize(follower, context)
| en | 0.729784 | API functions for adding data to CKAN. # FIXME this looks nasty and should be shared better # Define some shortcuts # Ensure they are module-private so that they don't get loaded as available # actions in the action API. Create a new dataset (package). You must be authorized to create new datasets. If you specify any groups for the new dataset, you must also be authorized to edit these groups. Plugins may change the parameters of this function depending on the value of the ``type`` parameter, see the :py:class:`~ckan.plugins.interfaces.IDatasetForm` plugin interface. :param name: the name of the new dataset, must be between 2 and 100 characters long and contain only lowercase alphanumeric characters, ``-`` and ``_``, e.g. ``'warandpeace'`` :type name: string :param title: the title of the dataset (optional, default: same as ``name``) :type title: string :param author: the name of the dataset's author (optional) :type author: string :param author_email: the email address of the dataset's author (optional) :type author_email: string :param maintainer: the name of the dataset's maintainer (optional) :type maintainer: string :param maintainer_email: the email address of the dataset's maintainer (optional) :type maintainer_email: string :param license_id: the id of the dataset's license, see :py:func:`~ckan.logic.action.get.license_list` for available values (optional) :type license_id: license id string :param notes: a description of the dataset (optional) :type notes: string :param url: a URL for the dataset's source (optional) :type url: string :param version: (optional) :type version: string, no longer than 100 characters :param state: the current state of the dataset, e.g. ``'active'`` or ``'deleted'``, only active datasets show up in search results and other lists of datasets, this parameter will be ignored if you are not authorized to change the state of the dataset (optional, default: ``'active'``) :type state: string :param type: the type of the dataset (optional), :py:class:`~ckan.plugins.interfaces.IDatasetForm` plugins associate themselves with different dataset types and provide custom dataset handling behaviour for these types :type type: string :param resources: the dataset's resources, see :py:func:`resource_create` for the format of resource dictionaries (optional) :type resources: list of resource dictionaries :param tags: the dataset's tags, see :py:func:`tag_create` for the format of tag dictionaries (optional) :type tags: list of tag dictionaries :param extras: the dataset's extras (optional), extras are arbitrary (key: value) metadata items that can be added to datasets, each extra dictionary should have keys ``'key'`` (a string), ``'value'`` (a string) :type extras: list of dataset extra dictionaries :param relationships_as_object: see :py:func:`package_relationship_create` for the format of relationship dictionaries (optional) :type relationships_as_object: list of relationship dictionaries :param relationships_as_subject: see :py:func:`package_relationship_create` for the format of relationship dictionaries (optional) :type relationships_as_subject: list of relationship dictionaries :param groups: the groups to which the dataset belongs (optional), each group dictionary should have one or more of the following keys which identify an existing group: ``'id'`` (the id of the group, string), ``'name'`` (the name of the group, string), ``'title'`` (the title of the group, string), to see which groups exist call :py:func:`~ckan.logic.action.get.group_list` :type groups: list of dictionaries :param owner_org: the id of the dataset's owning organization, see :py:func:`~ckan.logic.action.get.organization_list` or :py:func:`~ckan.logic.action.get.organization_list_for_user` for available values (optional) :type owner_org: string :returns: the newly created dataset (unless 'return_id_only' is set to True in the context, in which case just the dataset id will be returned) :rtype: dictionary # check_data_dict() is deprecated. If the package_plugin has a # check_data_dict() we'll call it, if it doesn't have the method we'll # do nothing. # Old plugins do not support passing the schema so we need # to ensure they still work # Needed to let extensions know the package id ## need to let rest api create ## this is added so that the rest controller can make a new location # Make sure that a user provided schema is not used on package_show Appends a new resource to a datasets list of resources. :param package_id: id of package that the resource should be added to. :type package_id: string :param url: url of resource :type url: string :param revision_id: (optional) :type revision_id: string :param description: (optional) :type description: string :param format: (optional) :type format: string :param hash: (optional) :type hash: string :param name: (optional) :type name: string :param resource_type: (optional) :type resource_type: string :param mimetype: (optional) :type mimetype: string :param mimetype_inner: (optional) :type mimetype_inner: string :param webstore_url: (optional) :type webstore_url: string :param cache_url: (optional) :type cache_url: string :param size: (optional) :type size: int :param created: (optional) :type created: iso date string :param last_modified: (optional) :type last_modified: iso date string :param cache_last_updated: (optional) :type cache_last_updated: iso date string :param webstore_last_updated: (optional) :type webstore_last_updated: iso date string :param upload: (optional) :type upload: FieldStorage (optional) needs multipart/form-data :returns: the newly created resource :rtype: dictionary ## Get out resource_id resource from model as it will not appear in ## package_show until after commit ## Run package show again to get out actual last_resource Creates a new resource view. :param resource_id: id of the resource :type resource_id: string :param title: the title of the view :type title: string :param description: a description of the view (optional) :type description: string :param view_type: type of view :type view_type: string :param config: options necessary to recreate a view state (optional) :type config: JSON string :returns: the newly created resource view :rtype: dictionary Add a new related item to a dataset. You must provide your API key in the Authorization header. :param title: the title of the related item :type title: string :param type: the type of the related item, e.g. ``'Application'``, ``'Idea'`` or ``'Visualisation'`` :type type: string :param id: the id of the related item (optional) :type id: string :param description: the description of the related item (optional) :type description: string :param url: the URL to the related item (optional) :type url: string :param image_url: the URL to the image for the related item (optional) :type image_url: string :param dataset_id: the name or id of the dataset that the related item belongs to (optional) :type dataset_id: string :returns: the newly created related item :rtype: dictionary Create a relationship between two datasets (packages). You must be authorized to edit both the subject and the object datasets. :param subject: the id or name of the dataset that is the subject of the relationship :type subject: string :param object: the id or name of the dataset that is the object of the relationship :param type: the type of the relationship, one of ``'depends_on'``, ``'dependency_of'``, ``'derives_from'``, ``'has_derivation'``, ``'links_to'``, ``'linked_from'``, ``'child_of'`` or ``'parent_of'`` :type type: string :param comment: a comment about the relationship (optional) :type comment: string :returns: the newly created package relationship :rtype: dictionary # Create a Package Relationship. Make an object (e.g. a user, dataset or group) a member of a group. If the object is already a member of the group then the capacity of the membership will be updated. You must be authorized to edit the group. :param id: the id or name of the group to add the object to :type id: string :param object: the id or name of the object to add :type object: string :param object_type: the type of the object being added, e.g. ``'package'`` or ``'user'`` :type object_type: string :param capacity: the capacity of the membership :type capacity: string :returns: the newly created (or updated) membership :rtype: dictionary # Look up existing, in case it exists # get the schema # old plugins do not support passing the schema so we need # to ensure they still work # Needed to let extensions know the group id # creator of group/org becomes an admin # this needs to be after the repo.commit or else revisions break # we are not a member of the group at this point Create a new group. You must be authorized to create groups. Plugins may change the parameters of this function depending on the value of the ``type`` parameter, see the :py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface. :param name: the name of the group, a string between 2 and 100 characters long, containing only lowercase alphanumeric characters, ``-`` and ``_`` :type name: string :param id: the id of the group (optional) :type id: string :param title: the title of the group (optional) :type title: string :param description: the description of the group (optional) :type description: string :param image_url: the URL to an image to be displayed on the group's page (optional) :type image_url: string :param type: the type of the group (optional), :py:class:`~ckan.plugins.interfaces.IGroupForm` plugins associate themselves with different group types and provide custom group handling behaviour for these types Cannot be 'organization' :type type: string :param state: the current state of the group, e.g. ``'active'`` or ``'deleted'``, only active groups show up in search results and other lists of groups, this parameter will be ignored if you are not authorized to change the state of the group (optional, default: ``'active'``) :type state: string :param approval_status: (optional) :type approval_status: string :param extras: the group's extras (optional), extras are arbitrary (key: value) metadata items that can be added to groups, each extra dictionary should have keys ``'key'`` (a string), ``'value'`` (a string), and optionally ``'deleted'`` :type extras: list of dataset extra dictionaries :param packages: the datasets (packages) that belong to the group, a list of dictionaries each with keys ``'name'`` (string, the id or name of the dataset) and optionally ``'title'`` (string, the title of the dataset) :type packages: list of dictionaries :param groups: the groups that belong to the group, a list of dictionaries each with key ``'name'`` (string, the id or name of the group) and optionally ``'capacity'`` (string, the capacity in which the group is a member of the group) :type groups: list of dictionaries :param users: the users that belong to the group, a list of dictionaries each with key ``'name'`` (string, the id or name of the user) and optionally ``'capacity'`` (string, the capacity in which the user is a member of the group) :type users: list of dictionaries :returns: the newly created group :rtype: dictionary # wrapper for creating groups # FIXME better exception? Create a new organization. You must be authorized to create organizations. Plugins may change the parameters of this function depending on the value of the ``type`` parameter, see the :py:class:`~ckan.plugins.interfaces.IGroupForm` plugin interface. :param name: the name of the organization, a string between 2 and 100 characters long, containing only lowercase alphanumeric characters, ``-`` and ``_`` :type name: string :param id: the id of the organization (optional) :type id: string :param title: the title of the organization (optional) :type title: string :param description: the description of the organization (optional) :type description: string :param image_url: the URL to an image to be displayed on the organization's page (optional) :type image_url: string :param state: the current state of the organization, e.g. ``'active'`` or ``'deleted'``, only active organizations show up in search results and other lists of organizations, this parameter will be ignored if you are not authorized to change the state of the organization (optional, default: ``'active'``) :type state: string :param approval_status: (optional) :type approval_status: string :param extras: the organization's extras (optional), extras are arbitrary (key: value) metadata items that can be added to organizations, each extra dictionary should have keys ``'key'`` (a string), ``'value'`` (a string), and optionally ``'deleted'`` :type extras: list of dataset extra dictionaries :param packages: the datasets (packages) that belong to the organization, a list of dictionaries each with keys ``'name'`` (string, the id or name of the dataset) and optionally ``'title'`` (string, the title of the dataset) :type packages: list of dictionaries :param users: the users that belong to the organization, a list of dictionaries each with key ``'name'`` (string, the id or name of the user) and optionally ``'capacity'`` (string, the capacity in which the user is a member of the organization) :type users: list of dictionaries :returns: the newly created organization :rtype: dictionary # wrapper for creating organizations Rate a dataset (package). You must provide your API key in the Authorization header. :param package: the name or id of the dataset to rate :type package: string :param rating: the rating to give to the dataset, an integer between 1 and 5 :type rating: int :returns: a dictionary with two keys: ``'rating average'`` (the average rating of the dataset you rated) and ``'rating count'`` (the number of times the dataset has been rated) :rtype: dictionary Create a new user. You must be authorized to create users. :param name: the name of the new user, a string between 2 and 100 characters in length, containing only lowercase alphanumeric characters, ``-`` and ``_`` :type name: string :param email: the email address for the new user :type email: string :param password: the password of the <PASSWORD>, a string of at least 4 characters :type password: string :param id: the id of the new user (optional) :type id: string :param fullname: the full name of the new user (optional) :type fullname: string :param about: a description of the new user (optional) :type about: string :param openid: (optional) :type openid: string :returns: the newly created yser :rtype: dictionary # Flush the session to cause user.id to be initialised, because # activity_create() (below) needs it. # A new context is required for dictizing the newly constructed user in # order that all the new user's data is returned, in particular, the # api_key. # # The context is copied so as not to clobber the caller's context dict. # Create dashboard for user. Invite a new user. You must be authorized to create group members. :param email: the email of the user to be invited to the group :type email: string :param group_id: the id or name of the group :type group_id: string :param role: role of the user in the group. One of ``member``, ``editor``, or ``admin`` :type role: string :returns: the newly created yser :rtype: dictionary # if we can't create a unique user name within this many attempts # then something else is probably wrong and we should give up ## Modifications for rest api Create a new tag vocabulary. You must be a sysadmin to create vocabularies. :param name: the name of the new vocabulary, e.g. ``'Genre'`` :type name: string :param tags: the new tags to add to the new vocabulary, for the format of tag dictionaries see :py:func:`tag_create` :type tags: list of tag dictionaries :returns: the newly-created vocabulary :rtype: dictionary Create a new activity stream activity. You must be a sysadmin to create new activities. :param user_id: the name or id of the user who carried out the activity, e.g. ``'seanh'`` :type user_id: string :param object_id: the name or id of the object of the activity, e.g. ``'my_dataset'`` :param activity_type: the type of the activity, this must be an activity type that CKAN knows how to render, e.g. ``'new package'``, ``'changed user'``, ``'deleted group'`` etc. :type activity_type: string :param data: any additional data about the activity :type data: dictionary :returns: the newly created activity :rtype: dictionary # this action had a ignore_auth param which has been removed # removed in 2.2 # Any revision_id that the caller attempts to pass in the activity_dict is # ignored and overwritten here. # rename keys # Don't be destructive to enable parameter values for # object and type to override the URL parameters. Create a new vocabulary tag. You must be a sysadmin to create vocabulary tags. You can only use this function to create tags that belong to a vocabulary, not to create free tags. (To create a new free tag simply add the tag to a package, e.g. using the :py:func:`~ckan.logic.action.update.package_update` function.) :param name: the name for the new tag, a string between 2 and 100 characters long containing only alphanumeric characters and ``-``, ``_`` and ``.``, e.g. ``'Jazz'`` :type name: string :param vocabulary_id: the name or id of the vocabulary that the new tag should be added to, e.g. ``'Genre'`` :type vocabulary_id: string :returns: the newly-created tag :rtype: dictionary Start following another user. You must provide your API key in the Authorization header. :param id: the id or name of the user to follow, e.g. ``'joeuser'`` :type id: string :returns: a representation of the 'follower' relationship between yourself and the other user :rtype: dictionary # Don't let a user follow herself. # Don't let a user follow someone she is already following. Start following a dataset. You must provide your API key in the Authorization header. :param id: the id or name of the dataset to follow, e.g. ``'warandpeace'`` :type id: string :returns: a representation of the 'follower' relationship between yourself and the dataset :rtype: dictionary # Don't let a user follow a dataset she is already following. # FIXME really package model should have this logic and provide # 'display_name' like users and groups # creator of group/org becomes an admin # this needs to be after the repo.commit or else revisions break Make a user a member of a group. You must be authorized to edit the group. :param id: the id or name of the group :type id: string :param username: name or id of the user to be made member of the group :type username: string :param role: role of the user in the group. One of ``member``, ``editor``, or ``admin`` :type role: string :returns: the newly created (or updated) membership :rtype: dictionary Make a user a member of an organization. You must be authorized to edit the organization. :param id: the id or name of the organization :type id: string :param username: name or id of the user to be made member of the organization :type username: string :param role: role of the user in the organization. One of ``member``, ``editor``, or ``admin`` :type role: string :returns: the newly created (or updated) membership :rtype: dictionary Start following a group. You must provide your API key in the Authorization header. :param id: the id or name of the group to follow, e.g. ``'roger'`` :type id: string :returns: a representation of the 'follower' relationship between yourself and the group :rtype: dictionary # Don't let a user follow a group she is already following. | 2.044494 | 2 |
archiv/tables.py | csae8092/djtranskribus | 0 | 6624904 | # generated by appcreator
import django_tables2 as tables
from django_tables2.utils import A
from browsing.browsing_utils import MergeColumn
from . models import (
TrpCollection,
TrpDocument,
TrpPage,
TrpTranscript
)
class TrpCollectionTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpCollection
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpDocumentTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
col_list = tables.columns.ManyToManyColumn()
class Meta:
model = TrpDocument
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpPageTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpPage
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpTranscriptTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpTranscript
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
| # generated by appcreator
import django_tables2 as tables
from django_tables2.utils import A
from browsing.browsing_utils import MergeColumn
from . models import (
TrpCollection,
TrpDocument,
TrpPage,
TrpTranscript
)
class TrpCollectionTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpCollection
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpDocumentTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
col_list = tables.columns.ManyToManyColumn()
class Meta:
model = TrpDocument
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpPageTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpPage
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
class TrpTranscriptTable(tables.Table):
id = tables.LinkColumn(verbose_name='ID')
merge = MergeColumn(verbose_name='keep | remove', accessor='pk')
class Meta:
model = TrpTranscript
sequence = ('id',)
attrs = {"class": "table table-responsive table-hover"}
| en | 0.916869 | # generated by appcreator | 1.930714 | 2 |
neon/layers/convolutional.py | kashif/neon | 1 | 6624905 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neural network layers involving the application of convolutional filters.
"""
import logging
from neon.backends.cpu import CPU
from neon.layers.layer import WeightLayer
from neon.util.param import opt_param
logger = logging.getLogger(__name__)
class ConvLayer(WeightLayer):
"""
Convolutional layer.
"""
def __init__(self, **kwargs):
self.is_local = True
super(ConvLayer, self).__init__(**kwargs)
opt_param(self, ['local_conv'], False)
def initialize(self, kwargs):
super(ConvLayer, self).initialize(kwargs)
self.initialize_local()
if self.pad != 0 and isinstance(self.backend, CPU):
raise NotImplementedError('pad != 0, for CPU backend in ConvLayer')
self.allocate_output_bufs()
opt_param(self, ['shared_bias'], True)
if self.shared_bias:
self.bias_shape = (self.nofm, 1)
self.bias_expand = self.backend.empty((self.nout, 1),
dtype=self.weight_dtype)
else:
self.bias_shape = (self.nout, 1)
self.allocate_param_bufs()
opt_param(self, ['prodbuf', 'bpropbuf', 'updatebuf'], None)
if isinstance(self.backend, CPU):
self.prodbuf = self.backend.empty((self.nofm, self.batch_size),
dtype=self.weight_dtype)
self.bpropbuf = self.backend.empty((self.fsize, self.batch_size),
dtype=self.weight_dtype)
self.updatebuf = self.backend.empty(self.weights.shape,
dtype=self.weight_dtype)
if hasattr(self.backend, 'ng'):
self.conv_params = self.backend.ng.conv_layer(
N=self.output.shape[1], C=self.nifm, K=self.nofm,
D=1, H=self.ifmshape[0], W=self.ifmshape[1], T=1,
R=self.fshape[0], S=self.fshape[1],
pad_d=0, pad_h=self.pad, pad_w=self.pad,
str_d=1, str_h=self.stride, str_w=self.stride,
grid_P=0, grid_Q=0,
dtype=self.weight_dtype)
self.prodbuf = self.bpropbuf = self.updatebuf = self.conv_params
if self.backend.is_dist:
self.bprop_events = self.backend.make_events()
self.update_events = self.backend.make_events()
else:
self.bprop_events = None
self.update_events = None
def set_weight_shape(self):
if hasattr(self, 'local_conv') and self.local_conv:
weight_shape = (self.fsize * self.ofmsize, self.nofm)
else:
weight_shape = (self.fsize, self.nofm)
opt_param(self, ['weight_shape'], weight_shape)
def make_views(self):
if self.shared_bias or self.batch_norm:
self.bias_expand_view = self.bias_expand.reshape(
(self.nofm, self.ofmsize))
self.pre_act_view = self.pre_act.reshape(
(self.nofm, self.ofmsize * self.output.shape[1]))
def make_mempool(self):
nr = self.backend.num_dev
poolsize = -(-self.weights.size // nr) * nr
self.mempool = self.backend.empty((poolsize, 1), self.updates_dtype)
def fprop(self, inputs):
self.backend.fprop_conv(out=self.pre_act, inputs=inputs,
weights=self.weights, ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs, ifmshape=self.ifmshape,
links=self.links, nifm=self.nifm,
padding=self.negpad, stride=self.stride,
ngroups=1, fpropbuf=self.prodbuf,
local=self.local_conv)
if self.use_biases is True:
if self.shared_bias:
self.backend.add(self.pre_act_view, self.biases,
out=self.pre_act_view)
else:
self.backend.add(self.pre_act, self.biases, out=self.pre_act)
if self.batch_norm:
self.bn.fprop_func(self.backend,
self.pre_act_view, self.pre_act_view)
self.activation.fprop_func(self.backend, self.pre_act, self.output)
def bprop(self, error):
inputs = self.prev_layer.output
self.activation.bprop_func(self.backend, self.pre_act, error,
self.skip_act)
upm = self.utemp if self.accumulate else self.updates
u_idx = 0
if self.batch_norm:
error_view = error.reshape(self.pre_act_view.shape)
self.bn.bprop_func(self.backend, self.pre_act_view, error_view,
self.skip_act)
u_idx = 2
if self.deltas is not None:
self.backend.bprop_conv(out=self.deltas, weights=self.weights,
deltas=error, ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs,
ifmshape=self.ifmshape, links=self.links,
padding=self.negpad, stride=self.stride,
nifm=self.nifm, ngroups=1,
bpropbuf=self.bpropbuf,
local=self.local_conv)
self.backend.update_conv(out=upm[u_idx], inputs=inputs,
weights=self.weights, deltas=error,
ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs,
ifmshape=self.ifmshape, links=self.links,
nifm=self.nifm, padding=self.negpad,
stride=self.stride, ngroups=1,
fwidth=self.fshape[-1],
updatebuf=self.updatebuf,
local=self.local_conv,
layer=self)
if self.use_biases is True:
# We can't reshape the error buffer since it might be global buffer
if self.shared_bias:
self.backend.sum(error, axes=1, out=self.bias_expand)
self.backend.sum(self.bias_expand_view, axes=1,
out=upm[u_idx+1])
else:
self.backend.sum(error, axes=1, out=upm[u_idx+1])
if self.accumulate:
self.backend.add(upm[u_idx], self.updates[u_idx],
out=self.updates[u_idx])
if self.use_biases is True:
self.backend.add(upm[1], self.updates[1], out=self.updates[1])
def share_updates(self):
assert self.backend.is_dist and self.is_local
assert self.mempool is not None
self.backend.synchronize()
for dbuf in self.updates:
nr = self.backend.num_dev
poolsize = -(-dbuf.size // nr) * nr
ubuf = self.mempool[:poolsize]
self.backend.reduce(dbuf, ubuf)
if self.batch_norm:
self.backend.divide(self.bn._beta_updates, self.backend.num_dev,
out=self.bn._beta_updates)
self.backend.divide(self.bn._gamma_updates, self.backend.num_dev,
out=self.bn._gamma_updates)
| # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neural network layers involving the application of convolutional filters.
"""
import logging
from neon.backends.cpu import CPU
from neon.layers.layer import WeightLayer
from neon.util.param import opt_param
logger = logging.getLogger(__name__)
class ConvLayer(WeightLayer):
"""
Convolutional layer.
"""
def __init__(self, **kwargs):
self.is_local = True
super(ConvLayer, self).__init__(**kwargs)
opt_param(self, ['local_conv'], False)
def initialize(self, kwargs):
super(ConvLayer, self).initialize(kwargs)
self.initialize_local()
if self.pad != 0 and isinstance(self.backend, CPU):
raise NotImplementedError('pad != 0, for CPU backend in ConvLayer')
self.allocate_output_bufs()
opt_param(self, ['shared_bias'], True)
if self.shared_bias:
self.bias_shape = (self.nofm, 1)
self.bias_expand = self.backend.empty((self.nout, 1),
dtype=self.weight_dtype)
else:
self.bias_shape = (self.nout, 1)
self.allocate_param_bufs()
opt_param(self, ['prodbuf', 'bpropbuf', 'updatebuf'], None)
if isinstance(self.backend, CPU):
self.prodbuf = self.backend.empty((self.nofm, self.batch_size),
dtype=self.weight_dtype)
self.bpropbuf = self.backend.empty((self.fsize, self.batch_size),
dtype=self.weight_dtype)
self.updatebuf = self.backend.empty(self.weights.shape,
dtype=self.weight_dtype)
if hasattr(self.backend, 'ng'):
self.conv_params = self.backend.ng.conv_layer(
N=self.output.shape[1], C=self.nifm, K=self.nofm,
D=1, H=self.ifmshape[0], W=self.ifmshape[1], T=1,
R=self.fshape[0], S=self.fshape[1],
pad_d=0, pad_h=self.pad, pad_w=self.pad,
str_d=1, str_h=self.stride, str_w=self.stride,
grid_P=0, grid_Q=0,
dtype=self.weight_dtype)
self.prodbuf = self.bpropbuf = self.updatebuf = self.conv_params
if self.backend.is_dist:
self.bprop_events = self.backend.make_events()
self.update_events = self.backend.make_events()
else:
self.bprop_events = None
self.update_events = None
def set_weight_shape(self):
if hasattr(self, 'local_conv') and self.local_conv:
weight_shape = (self.fsize * self.ofmsize, self.nofm)
else:
weight_shape = (self.fsize, self.nofm)
opt_param(self, ['weight_shape'], weight_shape)
def make_views(self):
if self.shared_bias or self.batch_norm:
self.bias_expand_view = self.bias_expand.reshape(
(self.nofm, self.ofmsize))
self.pre_act_view = self.pre_act.reshape(
(self.nofm, self.ofmsize * self.output.shape[1]))
def make_mempool(self):
nr = self.backend.num_dev
poolsize = -(-self.weights.size // nr) * nr
self.mempool = self.backend.empty((poolsize, 1), self.updates_dtype)
def fprop(self, inputs):
self.backend.fprop_conv(out=self.pre_act, inputs=inputs,
weights=self.weights, ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs, ifmshape=self.ifmshape,
links=self.links, nifm=self.nifm,
padding=self.negpad, stride=self.stride,
ngroups=1, fpropbuf=self.prodbuf,
local=self.local_conv)
if self.use_biases is True:
if self.shared_bias:
self.backend.add(self.pre_act_view, self.biases,
out=self.pre_act_view)
else:
self.backend.add(self.pre_act, self.biases, out=self.pre_act)
if self.batch_norm:
self.bn.fprop_func(self.backend,
self.pre_act_view, self.pre_act_view)
self.activation.fprop_func(self.backend, self.pre_act, self.output)
def bprop(self, error):
inputs = self.prev_layer.output
self.activation.bprop_func(self.backend, self.pre_act, error,
self.skip_act)
upm = self.utemp if self.accumulate else self.updates
u_idx = 0
if self.batch_norm:
error_view = error.reshape(self.pre_act_view.shape)
self.bn.bprop_func(self.backend, self.pre_act_view, error_view,
self.skip_act)
u_idx = 2
if self.deltas is not None:
self.backend.bprop_conv(out=self.deltas, weights=self.weights,
deltas=error, ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs,
ifmshape=self.ifmshape, links=self.links,
padding=self.negpad, stride=self.stride,
nifm=self.nifm, ngroups=1,
bpropbuf=self.bpropbuf,
local=self.local_conv)
self.backend.update_conv(out=upm[u_idx], inputs=inputs,
weights=self.weights, deltas=error,
ofmshape=self.ofmshape,
ofmsize=self.ofmsize,
ofmlocs=self.ofmlocs,
ifmshape=self.ifmshape, links=self.links,
nifm=self.nifm, padding=self.negpad,
stride=self.stride, ngroups=1,
fwidth=self.fshape[-1],
updatebuf=self.updatebuf,
local=self.local_conv,
layer=self)
if self.use_biases is True:
# We can't reshape the error buffer since it might be global buffer
if self.shared_bias:
self.backend.sum(error, axes=1, out=self.bias_expand)
self.backend.sum(self.bias_expand_view, axes=1,
out=upm[u_idx+1])
else:
self.backend.sum(error, axes=1, out=upm[u_idx+1])
if self.accumulate:
self.backend.add(upm[u_idx], self.updates[u_idx],
out=self.updates[u_idx])
if self.use_biases is True:
self.backend.add(upm[1], self.updates[1], out=self.updates[1])
def share_updates(self):
assert self.backend.is_dist and self.is_local
assert self.mempool is not None
self.backend.synchronize()
for dbuf in self.updates:
nr = self.backend.num_dev
poolsize = -(-dbuf.size // nr) * nr
ubuf = self.mempool[:poolsize]
self.backend.reduce(dbuf, ubuf)
if self.batch_norm:
self.backend.divide(self.bn._beta_updates, self.backend.num_dev,
out=self.bn._beta_updates)
self.backend.divide(self.bn._gamma_updates, self.backend.num_dev,
out=self.bn._gamma_updates)
| en | 0.776917 | # ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- Neural network layers involving the application of convolutional filters. Convolutional layer. # We can't reshape the error buffer since it might be global buffer | 2.273493 | 2 |
datalad/interface/utils.py | mslw/datalad | 298 | 6624906 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Interface utility functions
"""
__docformat__ = 'restructuredtext'
import inspect
import logging
import os
import wrapt
import sys
import re
from time import time
from os import curdir
from os import pardir
from os import listdir
import os.path as op
from os.path import join as opj
from os.path import isdir
from os.path import relpath
from os.path import sep
from os.path import split as psplit
from itertools import chain
import json
# avoid import from API to not get into circular imports
from datalad.utils import with_pathsep as _with_sep # TODO: RF whenever merge conflict is not upon us
from datalad.utils import (
path_startswith,
path_is_subpath,
ensure_unicode,
getargspec,
get_wrapped_class,
)
from datalad.support.gitrepo import GitRepo
from datalad.support.exceptions import IncompleteResultsError
from datalad import cfg as dlcfg
from datalad.dochelpers import (
exc_str,
single_or_plural,
)
from datalad.ui import ui
import datalad.support.ansi_colors as ac
from datalad.interface.base import default_logchannels
from datalad.interface.base import get_allargs_as_kwargs
from datalad.interface.common_opts import eval_params
from datalad.interface.common_opts import eval_defaults
from .results import known_result_xfms
from datalad.core.local.resulthooks import (
get_jsonhooks_from_config,
match_jsonhook2result,
run_jsonhook,
)
lgr = logging.getLogger('datalad.interface.utils')
def cls2cmdlinename(cls):
"Return the cmdline command name from an Interface class"
r = re.compile(r'([a-z0-9])([A-Z])')
return r.sub('\\1-\\2', cls.__name__).lower()
# TODO remove
# only `drop` and `uninstall` are still using this
def handle_dirty_dataset(ds, mode, msg=None):
"""Detect and treat unsaved changes as instructed by `mode`
Parameters
----------
ds : Dataset or None
Dataset to be inspected. Does nothing if `None`.
mode : {'fail', 'ignore', 'save-before'}
How to act upon discovering unsaved changes.
msg : str or None
Custom message to use for a potential commit.
Returns
-------
None
"""
if ds is None:
# nothing to be handled
return
if msg is None:
msg = '[DATALAD] auto-saved changes'
# make sure that all pending changes (batched annex operations, etc.)
# are actually reflected in Git
if ds.repo:
ds.repo.precommit()
if mode == 'ignore':
return
elif mode == 'fail':
if not ds.repo or ds.repo.dirty:
raise RuntimeError('dataset {} has unsaved changes'.format(ds))
elif mode == 'save-before':
if not ds.is_installed():
raise RuntimeError('dataset {} is not yet installed'.format(ds))
from datalad.core.local.save import Save
Save.__call__(dataset=ds, message=msg, updated=True)
else:
raise ValueError("unknown if-dirty mode '{}'".format(mode))
def get_tree_roots(paths):
"""Return common root paths for a set of paths
This function determines the smallest set of common root
paths and sorts all given paths under the respective
root.
Returns
-------
dict
paths by root
"""
paths_ws = [_with_sep(p) for p in paths]
# sort all paths under their potential roots
roots = {}
# start from the top to get all paths down the line
# and collate them into as few roots as possible
for s in sorted(paths_ws):
if any([s.startswith(r) for r in roots]):
# this path is already covered by a known root
continue
# find all sub paths
subs = [p for p in paths if p.startswith(s)]
roots[s.rstrip(sep)] = subs
return roots
# TODO remove
# only `remove` and `uninstall` use this, the uses path `path_is_subpath`
def path_is_under(values, path=None):
"""Whether a given path is a subdirectory of any of the given test values
Parameters
----------
values : sequence or dict
Paths to be tested against. This can be a dictionary in which case
all values from all keys will be tested against.
path : path or None
Test path. If None is given, the process' working directory is
used.
Returns
-------
bool
"""
if path is None:
from datalad.utils import getpwd
path = getpwd()
if isinstance(values, dict):
values = chain(*values.values())
path_drive, _ = op.splitdrive(path)
for p in values:
p_drive, _ = op.splitdrive(p)
# need to protect against unsupported use of relpath() with
# abspaths on windows from different drives (gh-3724)
if path_drive != p_drive:
# different drives, enough evidence for "not under"
continue
rpath = relpath(p, start=path)
if rpath == curdir \
or rpath == pardir \
or set(psplit(rpath)) == {pardir}:
# first match is enough
return True
return False
# TODO(OPT)? YOH: from a cursory review seems like possibly an expensive function
# whenever many paths were provided (e.g. via shell glob).
# Might be worth testing on some usecase and py-spy'ing if notable portion
# of time is spent.
def discover_dataset_trace_to_targets(basepath, targetpaths, current_trace,
spec, includeds=None):
"""Discover the edges and nodes in a dataset tree to given target paths
Parameters
----------
basepath : path
Path to a start or top-level dataset. Really has to be a path to a
dataset!
targetpaths : list(path)
Any non-zero number of paths that are termination points for the
search algorithm. Can be paths to datasets, directories, or files
(and any combination thereof).
current_trace : list
For a top-level call this should probably always be `[]`
spec : dict
`content_by_ds`-style dictionary that will receive information about the
discovered datasets. Specifically, for each discovered dataset there
will be an item with its path under the key (path) of the respective
superdataset.
includeds : sequence, optional
Any paths given are treated as existing subdatasets, regardless of
whether they can be found in the filesystem. Such subdatasets will appear
under the key of the closest existing dataset in the `spec`.
Returns
-------
None
Function calls itself recursively and populates `spec` dict in-place.
Keys are dataset paths, values are sets of subdataset paths
"""
# convert to set for faster lookup
includeds = includeds if isinstance(includeds, set) else \
set() if includeds is None else set(includeds)
# this beast walks the directory tree from a given `basepath` until
# it discovers any of the given `targetpaths`
# if it finds one, it commits any accummulated trace of visited
# datasets on this edge to the spec
valid_repo = GitRepo.is_valid_repo(basepath)
if valid_repo:
# we are passing into a new dataset, extend the dataset trace
current_trace = current_trace + [basepath]
# this edge is not done, we need to try to reach any downstream
# dataset
undiscovered_ds = set(t for t in targetpaths) # if t != basepath)
# whether anything in this directory matched a targetpath
filematch = False
if isdir(basepath):
for p in listdir(basepath):
p = ensure_unicode(opj(basepath, p))
if not isdir(p):
if p in targetpaths:
filematch = True
# we cannot have anything below this one
continue
# OPT listdir might be large and we could have only few items
# in `targetpaths` -- so traverse only those in spec which have
# leading dir basepath
# filter targets matching this downward path
downward_targets = set(
t for t in targetpaths if path_startswith(t, p))
if not downward_targets:
continue
# remove the matching ones from the "todo" list
undiscovered_ds.difference_update(downward_targets)
# go one deeper
discover_dataset_trace_to_targets(
p, downward_targets, current_trace, spec,
includeds=includeds if not includeds else includeds.intersection(
downward_targets))
undiscovered_ds = [t for t in undiscovered_ds
if includeds and
path_is_subpath(t, current_trace[-1]) and
t in includeds]
if filematch or basepath in targetpaths or undiscovered_ds:
for i, p in enumerate(current_trace[:-1]):
# TODO RF prepare proper annotated path dicts
subds = spec.get(p, set())
subds.add(current_trace[i + 1])
spec[p] = subds
if undiscovered_ds:
spec[current_trace[-1]] = spec.get(current_trace[-1], set()).union(
undiscovered_ds)
def get_result_filter(fx):
"""Wrap a filter into a helper to be able to accept additional
arguments, if the filter doesn't support it already"""
_fx = fx
if fx and not getargspec(fx).keywords:
def _fx(res, **kwargs):
return fx(res)
return _fx
def eval_results(func):
"""Decorator for return value evaluation of datalad commands.
Note, this decorator is only compatible with commands that return
status dict sequences!
Two basic modes of operation are supported: 1) "generator mode" that
`yields` individual results, and 2) "list mode" that returns a sequence of
results. The behavior can be selected via the kwarg `return_type`.
Default is "list mode".
This decorator implements common functionality for result rendering/output,
error detection/handling, and logging.
Result rendering/output can be triggered via the
`datalad.api.result-renderer` configuration variable, or the
`result_renderer` keyword argument of each decorated command. Supported
modes are: 'default' (one line per result with action, status, path,
and an optional message); 'json' (one object per result, like git-annex),
'json_pp' (like 'json', but pretty-printed spanning multiple lines),
'tailored' custom output formatting provided by each command
class (if any).
Error detection works by inspecting the `status` item of all result
dictionaries. Any occurrence of a status other than 'ok' or 'notneeded'
will cause an IncompleteResultsError exception to be raised that carries
the failed actions' status dictionaries in its `failed` attribute.
Status messages will be logged automatically, by default the following
association of result status and log channel will be used: 'ok' (debug),
'notneeded' (debug), 'impossible' (warning), 'error' (error). Logger
instances included in the results are used to capture the origin of a
status report.
Parameters
----------
func: function
__call__ method of a subclass of Interface,
i.e. a datalad command definition
"""
@wrapt.decorator
def eval_func(wrapped, instance, args, kwargs):
lgr.log(2, "Entered eval_func for %s", func)
# for result filters
# we need to produce a dict with argname/argvalue pairs for all args
# incl. defaults and args given as positionals
allkwargs = get_allargs_as_kwargs(wrapped, args, kwargs)
# determine the command class associated with `wrapped`
wrapped_class = get_wrapped_class(wrapped)
# retrieve common options from kwargs, and fall back on the command
# class attributes, or general defaults if needed
kwargs = kwargs.copy() # we will pop, which might cause side-effect
common_params = {
p_name: kwargs.pop(
# go with any explicitly given default
p_name,
# otherwise determine the command class and pull any
# default set in that class
getattr(
wrapped_class,
p_name,
# or the common default
eval_defaults[p_name]))
for p_name in eval_params}
# short cuts and configured setup for common options
return_type = common_params['return_type']
result_filter = get_result_filter(common_params['result_filter'])
# resolve string labels for transformers too
result_xfm = known_result_xfms.get(
common_params['result_xfm'],
# use verbatim, if not a known label
common_params['result_xfm'])
result_renderer = common_params['result_renderer']
# TODO remove this conditional branch entirely, done outside
if not result_renderer:
result_renderer = dlcfg.get('datalad.api.result-renderer', None)
# look for potential override of logging behavior
result_log_level = dlcfg.get('datalad.log.result-level', 'debug')
# query cfg for defaults
# .is_installed and .config can be costly, so ensure we do
# it only once. See https://github.com/datalad/datalad/issues/3575
dataset_arg = allkwargs.get('dataset', None)
from datalad.distribution.dataset import Dataset
ds = dataset_arg if isinstance(dataset_arg, Dataset) \
else Dataset(dataset_arg) if dataset_arg else None
# look for hooks
hooks = get_jsonhooks_from_config(ds.config if ds else dlcfg)
# this internal helper function actually drives the command
# generator-style, it may generate an exception if desired,
# on incomplete results
def generator_func(*_args, **_kwargs):
# flag whether to raise an exception
incomplete_results = []
# track what actions were performed how many times
action_summary = {}
# if a custom summary is to be provided, collect the results
# of the command execution
results = []
do_custom_result_summary = result_renderer in ('tailored', 'default') \
and hasattr(wrapped_class, 'custom_result_summary_renderer')
pass_summary = do_custom_result_summary and \
getattr(wrapped_class,
'custom_result_summary_renderer_pass_summary', None)
# process main results
for r in _process_results(
# execution
wrapped(*_args, **_kwargs),
wrapped_class,
common_params['on_failure'],
# bookkeeping
action_summary,
incomplete_results,
# communication
result_renderer,
result_log_level,
# let renderers get to see how a command was called
allkwargs):
for hook, spec in hooks.items():
# run the hooks before we yield the result
# this ensures that they are executed before
# a potentially wrapper command gets to act
# on them
if match_jsonhook2result(hook, r, spec['match']):
lgr.debug('Result %s matches hook %s', r, hook)
# a hook is also a command that yields results
# so yield them outside too
# users need to pay attention to void infinite
# loops, i.e. when a hook yields a result that
# triggers that same hook again
for hr in run_jsonhook(hook, spec, r, dataset_arg):
# apply same logic as for main results, otherwise
# any filters would only tackle the primary results
# and a mixture of return values could happen
if not keep_result(hr, result_filter, **allkwargs):
continue
hr = xfm_result(hr, result_xfm)
# rationale for conditional is a few lines down
if hr:
yield hr
if not keep_result(r, result_filter, **allkwargs):
continue
r = xfm_result(r, result_xfm)
# in case the result_xfm decided to not give us anything
# exclude it from the results. There is no particular reason
# to do so other than that it was established behavior when
# this comment was written. This will not affect any real
# result record
if r:
yield r
# collect if summary is desired
if do_custom_result_summary:
results.append(r)
# result summary before a potential exception
# custom first
if do_custom_result_summary:
if pass_summary:
summary_args = (results, action_summary)
else:
summary_args = (results,)
wrapped_class.custom_result_summary_renderer(*summary_args)
elif result_renderer == 'default' and action_summary and \
sum(sum(s.values()) for s in action_summary.values()) > 1:
# give a summary in default mode, when there was more than one
# action performed
render_action_summary(action_summary)
if incomplete_results:
raise IncompleteResultsError(
failed=incomplete_results,
msg="Command did not complete successfully")
if return_type == 'generator':
# hand over the generator
lgr.log(2, "Returning generator_func from eval_func for %s", wrapped_class)
return generator_func(*args, **kwargs)
else:
@wrapt.decorator
def return_func(wrapped_, instance_, args_, kwargs_):
results = wrapped_(*args_, **kwargs_)
if inspect.isgenerator(results):
# unwind generator if there is one, this actually runs
# any processing
results = list(results)
if return_type == 'item-or-list' and \
len(results) < 2:
return results[0] if results else None
else:
return results
lgr.log(2, "Returning return_func from eval_func for %s", wrapped_class)
return return_func(generator_func)(*args, **kwargs)
ret = eval_func(func)
ret._eval_results = True
return ret
def default_result_renderer(res):
if res.get('status', None) != 'notneeded':
path = res.get('path', None)
if path and res.get('refds'):
try:
path = relpath(path, res['refds'])
except ValueError:
# can happen, e.g., on windows with paths from different
# drives. just go with the original path in this case
pass
ui.message('{action}({status}):{path}{type}{msg}{err}'.format(
action=ac.color_word(
res.get('action', '<action-unspecified>'),
ac.BOLD),
status=ac.color_status(res.get('status', '<status-unspecified>')),
path=' {}'.format(path) if path else '',
type=' ({})'.format(
ac.color_word(res['type'], ac.MAGENTA)
) if 'type' in res else '',
msg=' [{}]'.format(
res['message'][0] % res['message'][1:]
if isinstance(res['message'], tuple) else res[
'message'])
if res.get('message', None) else '',
err=ac.color_word(' [{}]'.format(
res['error_message'][0] % res['error_message'][1:]
if isinstance(res['error_message'], tuple) else res[
'error_message']), ac.RED)
if res.get('error_message', None) and res.get('status', None) != 'ok' else ''))
def render_action_summary(action_summary):
ui.message("action summary:\n {}".format(
'\n '.join('{} ({})'.format(
act,
', '.join('{}: {}'.format(status, action_summary[act][status])
for status in sorted(action_summary[act])))
for act in sorted(action_summary))))
def _display_suppressed_message(nsimilar, ndisplayed, last_ts, final=False):
# +1 because there was the original result + nsimilar displayed.
n_suppressed = nsimilar - ndisplayed + 1
if n_suppressed > 0:
ts = time()
# rate-limit update of suppression message, with a large number
# of fast-paced results updating for each one can result in more
# CPU load than the actual processing
# arbitrarily go for a 2Hz update frequency -- it "feels" good
if last_ts is None or final or (ts - last_ts > 0.5):
ui.message(' [{} similar {} been suppressed; disable with datalad.ui.suppress-similar-results=off]'
.format(n_suppressed,
single_or_plural("message has",
"messages have",
n_suppressed, False)),
cr="\n" if final else "\r")
return ts
return last_ts
def _process_results(
results,
cmd_class,
on_failure,
action_summary,
incomplete_results,
result_renderer,
result_log_level,
allkwargs):
# private helper pf @eval_results
# loop over results generated from some source and handle each
# of them according to the requested behavior (logging, rendering, ...)
# used to track repeated messages in the default renderer
last_result = None
last_result_ts = None
# which result dict keys to inspect for changes to discover repetitions
# of similar messages
repetition_keys = set(('action', 'status', 'type', 'refds'))
# counter for detected repetitions
result_repetitions = 0
# how many repetitions to show, before suppression kicks in
render_n_repetitions = \
dlcfg.obtain('datalad.ui.suppress-similar-results-threshold') \
if sys.stdout.isatty() \
and dlcfg.obtain('datalad.ui.suppress-similar-results') \
else float("inf")
for res in results:
if not res or 'action' not in res:
# XXX Yarik has to no clue on how to track the origin of the
# record to figure out WTF, so he just skips it
# but MIH thinks leaving a trace of that would be good
lgr.debug('Drop result record without "action": %s', res)
continue
actsum = action_summary.get(res['action'], {})
if res['status']:
actsum[res['status']] = actsum.get(res['status'], 0) + 1
action_summary[res['action']] = actsum
## log message, if there is one and a logger was given
msg = res.get('message', None)
# remove logger instance from results, as it is no longer useful
# after logging was done, it isn't serializable, and generally
# pollutes the output
res_lgr = res.pop('logger', None)
if msg and res_lgr:
if isinstance(res_lgr, logging.Logger):
# didn't get a particular log function, go with default
res_lgr = getattr(
res_lgr,
default_logchannels[res['status']]
if result_log_level == 'match-status'
else result_log_level)
msg = res['message']
msgargs = None
if isinstance(msg, tuple):
msgargs = msg[1:]
msg = msg[0]
if 'path' in res:
# result path could be a path instance
path = str(res['path'])
if msgargs:
# we will pass the msg for %-polation, so % should be doubled
path = path.replace('%', '%%')
msg = '{} [{}({})]'.format(
msg, res['action'], path)
if msgargs:
# support string expansion of logging to avoid runtime cost
try:
res_lgr(msg, *msgargs)
except TypeError as exc:
raise TypeError(
"Failed to render %r with %r from %r: %s"
% (msg, msgargs, res, exc_str(exc))
)
else:
res_lgr(msg)
## output rendering
# TODO RF this in a simple callable that gets passed into this function
if result_renderer is None or result_renderer == 'disabled':
pass
elif result_renderer == 'default':
trimmed_result = {k: v for k, v in res.items() if k in repetition_keys}
if res.get('status', None) != 'notneeded' \
and trimmed_result == last_result:
# this is a similar report, suppress if too many, but count it
result_repetitions += 1
if result_repetitions < render_n_repetitions:
default_result_renderer(res)
else:
last_result_ts = _display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts)
else:
# this one is new, first report on any prev. suppressed results
# by number, and then render this fresh one
last_result_ts = _display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts,
final=True)
default_result_renderer(res)
result_repetitions = 0
last_result = trimmed_result
elif result_renderer in ('json', 'json_pp'):
ui.message(json.dumps(
{k: v for k, v in res.items()
if k not in ('logger')},
sort_keys=True,
indent=2 if result_renderer.endswith('_pp') else None,
default=str))
elif result_renderer in ('tailored', 'default'):
if hasattr(cmd_class, 'custom_result_renderer'):
cmd_class.custom_result_renderer(res, **allkwargs)
elif hasattr(result_renderer, '__call__'):
try:
result_renderer(res, **allkwargs)
except Exception as e:
lgr.warning('Result rendering failed for: %s [%s]',
res, exc_str(e))
else:
raise ValueError('unknown result renderer "{}"'.format(result_renderer))
## error handling
# looks for error status, and report at the end via
# an exception
if on_failure in ('continue', 'stop') \
and res['status'] in ('impossible', 'error'):
incomplete_results.append(res)
if on_failure == 'stop':
# first fail -> that's it
# raise will happen after the loop
break
yield res
# make sure to report on any issues that we had suppressed
_display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts, final=True)
def keep_result(res, rfilter, **kwargs):
if not rfilter:
return True
try:
if not rfilter(res, **kwargs):
# give the slightest indication which filter was employed
raise ValueError(
'excluded by filter {} with arguments {}'.format(rfilter, kwargs))
except ValueError as e:
# make sure to report the excluded result to massively improve
# debugging experience
lgr.debug('Not reporting result (%s): %s', exc_str(e), res)
return False
return True
def xfm_result(res, xfm):
if not xfm:
return res
return xfm(res)
| # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Interface utility functions
"""
__docformat__ = 'restructuredtext'
import inspect
import logging
import os
import wrapt
import sys
import re
from time import time
from os import curdir
from os import pardir
from os import listdir
import os.path as op
from os.path import join as opj
from os.path import isdir
from os.path import relpath
from os.path import sep
from os.path import split as psplit
from itertools import chain
import json
# avoid import from API to not get into circular imports
from datalad.utils import with_pathsep as _with_sep # TODO: RF whenever merge conflict is not upon us
from datalad.utils import (
path_startswith,
path_is_subpath,
ensure_unicode,
getargspec,
get_wrapped_class,
)
from datalad.support.gitrepo import GitRepo
from datalad.support.exceptions import IncompleteResultsError
from datalad import cfg as dlcfg
from datalad.dochelpers import (
exc_str,
single_or_plural,
)
from datalad.ui import ui
import datalad.support.ansi_colors as ac
from datalad.interface.base import default_logchannels
from datalad.interface.base import get_allargs_as_kwargs
from datalad.interface.common_opts import eval_params
from datalad.interface.common_opts import eval_defaults
from .results import known_result_xfms
from datalad.core.local.resulthooks import (
get_jsonhooks_from_config,
match_jsonhook2result,
run_jsonhook,
)
lgr = logging.getLogger('datalad.interface.utils')
def cls2cmdlinename(cls):
"Return the cmdline command name from an Interface class"
r = re.compile(r'([a-z0-9])([A-Z])')
return r.sub('\\1-\\2', cls.__name__).lower()
# TODO remove
# only `drop` and `uninstall` are still using this
def handle_dirty_dataset(ds, mode, msg=None):
"""Detect and treat unsaved changes as instructed by `mode`
Parameters
----------
ds : Dataset or None
Dataset to be inspected. Does nothing if `None`.
mode : {'fail', 'ignore', 'save-before'}
How to act upon discovering unsaved changes.
msg : str or None
Custom message to use for a potential commit.
Returns
-------
None
"""
if ds is None:
# nothing to be handled
return
if msg is None:
msg = '[DATALAD] auto-saved changes'
# make sure that all pending changes (batched annex operations, etc.)
# are actually reflected in Git
if ds.repo:
ds.repo.precommit()
if mode == 'ignore':
return
elif mode == 'fail':
if not ds.repo or ds.repo.dirty:
raise RuntimeError('dataset {} has unsaved changes'.format(ds))
elif mode == 'save-before':
if not ds.is_installed():
raise RuntimeError('dataset {} is not yet installed'.format(ds))
from datalad.core.local.save import Save
Save.__call__(dataset=ds, message=msg, updated=True)
else:
raise ValueError("unknown if-dirty mode '{}'".format(mode))
def get_tree_roots(paths):
"""Return common root paths for a set of paths
This function determines the smallest set of common root
paths and sorts all given paths under the respective
root.
Returns
-------
dict
paths by root
"""
paths_ws = [_with_sep(p) for p in paths]
# sort all paths under their potential roots
roots = {}
# start from the top to get all paths down the line
# and collate them into as few roots as possible
for s in sorted(paths_ws):
if any([s.startswith(r) for r in roots]):
# this path is already covered by a known root
continue
# find all sub paths
subs = [p for p in paths if p.startswith(s)]
roots[s.rstrip(sep)] = subs
return roots
# TODO remove
# only `remove` and `uninstall` use this, the uses path `path_is_subpath`
def path_is_under(values, path=None):
"""Whether a given path is a subdirectory of any of the given test values
Parameters
----------
values : sequence or dict
Paths to be tested against. This can be a dictionary in which case
all values from all keys will be tested against.
path : path or None
Test path. If None is given, the process' working directory is
used.
Returns
-------
bool
"""
if path is None:
from datalad.utils import getpwd
path = getpwd()
if isinstance(values, dict):
values = chain(*values.values())
path_drive, _ = op.splitdrive(path)
for p in values:
p_drive, _ = op.splitdrive(p)
# need to protect against unsupported use of relpath() with
# abspaths on windows from different drives (gh-3724)
if path_drive != p_drive:
# different drives, enough evidence for "not under"
continue
rpath = relpath(p, start=path)
if rpath == curdir \
or rpath == pardir \
or set(psplit(rpath)) == {pardir}:
# first match is enough
return True
return False
# TODO(OPT)? YOH: from a cursory review seems like possibly an expensive function
# whenever many paths were provided (e.g. via shell glob).
# Might be worth testing on some usecase and py-spy'ing if notable portion
# of time is spent.
def discover_dataset_trace_to_targets(basepath, targetpaths, current_trace,
spec, includeds=None):
"""Discover the edges and nodes in a dataset tree to given target paths
Parameters
----------
basepath : path
Path to a start or top-level dataset. Really has to be a path to a
dataset!
targetpaths : list(path)
Any non-zero number of paths that are termination points for the
search algorithm. Can be paths to datasets, directories, or files
(and any combination thereof).
current_trace : list
For a top-level call this should probably always be `[]`
spec : dict
`content_by_ds`-style dictionary that will receive information about the
discovered datasets. Specifically, for each discovered dataset there
will be an item with its path under the key (path) of the respective
superdataset.
includeds : sequence, optional
Any paths given are treated as existing subdatasets, regardless of
whether they can be found in the filesystem. Such subdatasets will appear
under the key of the closest existing dataset in the `spec`.
Returns
-------
None
Function calls itself recursively and populates `spec` dict in-place.
Keys are dataset paths, values are sets of subdataset paths
"""
# convert to set for faster lookup
includeds = includeds if isinstance(includeds, set) else \
set() if includeds is None else set(includeds)
# this beast walks the directory tree from a given `basepath` until
# it discovers any of the given `targetpaths`
# if it finds one, it commits any accummulated trace of visited
# datasets on this edge to the spec
valid_repo = GitRepo.is_valid_repo(basepath)
if valid_repo:
# we are passing into a new dataset, extend the dataset trace
current_trace = current_trace + [basepath]
# this edge is not done, we need to try to reach any downstream
# dataset
undiscovered_ds = set(t for t in targetpaths) # if t != basepath)
# whether anything in this directory matched a targetpath
filematch = False
if isdir(basepath):
for p in listdir(basepath):
p = ensure_unicode(opj(basepath, p))
if not isdir(p):
if p in targetpaths:
filematch = True
# we cannot have anything below this one
continue
# OPT listdir might be large and we could have only few items
# in `targetpaths` -- so traverse only those in spec which have
# leading dir basepath
# filter targets matching this downward path
downward_targets = set(
t for t in targetpaths if path_startswith(t, p))
if not downward_targets:
continue
# remove the matching ones from the "todo" list
undiscovered_ds.difference_update(downward_targets)
# go one deeper
discover_dataset_trace_to_targets(
p, downward_targets, current_trace, spec,
includeds=includeds if not includeds else includeds.intersection(
downward_targets))
undiscovered_ds = [t for t in undiscovered_ds
if includeds and
path_is_subpath(t, current_trace[-1]) and
t in includeds]
if filematch or basepath in targetpaths or undiscovered_ds:
for i, p in enumerate(current_trace[:-1]):
# TODO RF prepare proper annotated path dicts
subds = spec.get(p, set())
subds.add(current_trace[i + 1])
spec[p] = subds
if undiscovered_ds:
spec[current_trace[-1]] = spec.get(current_trace[-1], set()).union(
undiscovered_ds)
def get_result_filter(fx):
"""Wrap a filter into a helper to be able to accept additional
arguments, if the filter doesn't support it already"""
_fx = fx
if fx and not getargspec(fx).keywords:
def _fx(res, **kwargs):
return fx(res)
return _fx
def eval_results(func):
"""Decorator for return value evaluation of datalad commands.
Note, this decorator is only compatible with commands that return
status dict sequences!
Two basic modes of operation are supported: 1) "generator mode" that
`yields` individual results, and 2) "list mode" that returns a sequence of
results. The behavior can be selected via the kwarg `return_type`.
Default is "list mode".
This decorator implements common functionality for result rendering/output,
error detection/handling, and logging.
Result rendering/output can be triggered via the
`datalad.api.result-renderer` configuration variable, or the
`result_renderer` keyword argument of each decorated command. Supported
modes are: 'default' (one line per result with action, status, path,
and an optional message); 'json' (one object per result, like git-annex),
'json_pp' (like 'json', but pretty-printed spanning multiple lines),
'tailored' custom output formatting provided by each command
class (if any).
Error detection works by inspecting the `status` item of all result
dictionaries. Any occurrence of a status other than 'ok' or 'notneeded'
will cause an IncompleteResultsError exception to be raised that carries
the failed actions' status dictionaries in its `failed` attribute.
Status messages will be logged automatically, by default the following
association of result status and log channel will be used: 'ok' (debug),
'notneeded' (debug), 'impossible' (warning), 'error' (error). Logger
instances included in the results are used to capture the origin of a
status report.
Parameters
----------
func: function
__call__ method of a subclass of Interface,
i.e. a datalad command definition
"""
@wrapt.decorator
def eval_func(wrapped, instance, args, kwargs):
lgr.log(2, "Entered eval_func for %s", func)
# for result filters
# we need to produce a dict with argname/argvalue pairs for all args
# incl. defaults and args given as positionals
allkwargs = get_allargs_as_kwargs(wrapped, args, kwargs)
# determine the command class associated with `wrapped`
wrapped_class = get_wrapped_class(wrapped)
# retrieve common options from kwargs, and fall back on the command
# class attributes, or general defaults if needed
kwargs = kwargs.copy() # we will pop, which might cause side-effect
common_params = {
p_name: kwargs.pop(
# go with any explicitly given default
p_name,
# otherwise determine the command class and pull any
# default set in that class
getattr(
wrapped_class,
p_name,
# or the common default
eval_defaults[p_name]))
for p_name in eval_params}
# short cuts and configured setup for common options
return_type = common_params['return_type']
result_filter = get_result_filter(common_params['result_filter'])
# resolve string labels for transformers too
result_xfm = known_result_xfms.get(
common_params['result_xfm'],
# use verbatim, if not a known label
common_params['result_xfm'])
result_renderer = common_params['result_renderer']
# TODO remove this conditional branch entirely, done outside
if not result_renderer:
result_renderer = dlcfg.get('datalad.api.result-renderer', None)
# look for potential override of logging behavior
result_log_level = dlcfg.get('datalad.log.result-level', 'debug')
# query cfg for defaults
# .is_installed and .config can be costly, so ensure we do
# it only once. See https://github.com/datalad/datalad/issues/3575
dataset_arg = allkwargs.get('dataset', None)
from datalad.distribution.dataset import Dataset
ds = dataset_arg if isinstance(dataset_arg, Dataset) \
else Dataset(dataset_arg) if dataset_arg else None
# look for hooks
hooks = get_jsonhooks_from_config(ds.config if ds else dlcfg)
# this internal helper function actually drives the command
# generator-style, it may generate an exception if desired,
# on incomplete results
def generator_func(*_args, **_kwargs):
# flag whether to raise an exception
incomplete_results = []
# track what actions were performed how many times
action_summary = {}
# if a custom summary is to be provided, collect the results
# of the command execution
results = []
do_custom_result_summary = result_renderer in ('tailored', 'default') \
and hasattr(wrapped_class, 'custom_result_summary_renderer')
pass_summary = do_custom_result_summary and \
getattr(wrapped_class,
'custom_result_summary_renderer_pass_summary', None)
# process main results
for r in _process_results(
# execution
wrapped(*_args, **_kwargs),
wrapped_class,
common_params['on_failure'],
# bookkeeping
action_summary,
incomplete_results,
# communication
result_renderer,
result_log_level,
# let renderers get to see how a command was called
allkwargs):
for hook, spec in hooks.items():
# run the hooks before we yield the result
# this ensures that they are executed before
# a potentially wrapper command gets to act
# on them
if match_jsonhook2result(hook, r, spec['match']):
lgr.debug('Result %s matches hook %s', r, hook)
# a hook is also a command that yields results
# so yield them outside too
# users need to pay attention to void infinite
# loops, i.e. when a hook yields a result that
# triggers that same hook again
for hr in run_jsonhook(hook, spec, r, dataset_arg):
# apply same logic as for main results, otherwise
# any filters would only tackle the primary results
# and a mixture of return values could happen
if not keep_result(hr, result_filter, **allkwargs):
continue
hr = xfm_result(hr, result_xfm)
# rationale for conditional is a few lines down
if hr:
yield hr
if not keep_result(r, result_filter, **allkwargs):
continue
r = xfm_result(r, result_xfm)
# in case the result_xfm decided to not give us anything
# exclude it from the results. There is no particular reason
# to do so other than that it was established behavior when
# this comment was written. This will not affect any real
# result record
if r:
yield r
# collect if summary is desired
if do_custom_result_summary:
results.append(r)
# result summary before a potential exception
# custom first
if do_custom_result_summary:
if pass_summary:
summary_args = (results, action_summary)
else:
summary_args = (results,)
wrapped_class.custom_result_summary_renderer(*summary_args)
elif result_renderer == 'default' and action_summary and \
sum(sum(s.values()) for s in action_summary.values()) > 1:
# give a summary in default mode, when there was more than one
# action performed
render_action_summary(action_summary)
if incomplete_results:
raise IncompleteResultsError(
failed=incomplete_results,
msg="Command did not complete successfully")
if return_type == 'generator':
# hand over the generator
lgr.log(2, "Returning generator_func from eval_func for %s", wrapped_class)
return generator_func(*args, **kwargs)
else:
@wrapt.decorator
def return_func(wrapped_, instance_, args_, kwargs_):
results = wrapped_(*args_, **kwargs_)
if inspect.isgenerator(results):
# unwind generator if there is one, this actually runs
# any processing
results = list(results)
if return_type == 'item-or-list' and \
len(results) < 2:
return results[0] if results else None
else:
return results
lgr.log(2, "Returning return_func from eval_func for %s", wrapped_class)
return return_func(generator_func)(*args, **kwargs)
ret = eval_func(func)
ret._eval_results = True
return ret
def default_result_renderer(res):
if res.get('status', None) != 'notneeded':
path = res.get('path', None)
if path and res.get('refds'):
try:
path = relpath(path, res['refds'])
except ValueError:
# can happen, e.g., on windows with paths from different
# drives. just go with the original path in this case
pass
ui.message('{action}({status}):{path}{type}{msg}{err}'.format(
action=ac.color_word(
res.get('action', '<action-unspecified>'),
ac.BOLD),
status=ac.color_status(res.get('status', '<status-unspecified>')),
path=' {}'.format(path) if path else '',
type=' ({})'.format(
ac.color_word(res['type'], ac.MAGENTA)
) if 'type' in res else '',
msg=' [{}]'.format(
res['message'][0] % res['message'][1:]
if isinstance(res['message'], tuple) else res[
'message'])
if res.get('message', None) else '',
err=ac.color_word(' [{}]'.format(
res['error_message'][0] % res['error_message'][1:]
if isinstance(res['error_message'], tuple) else res[
'error_message']), ac.RED)
if res.get('error_message', None) and res.get('status', None) != 'ok' else ''))
def render_action_summary(action_summary):
ui.message("action summary:\n {}".format(
'\n '.join('{} ({})'.format(
act,
', '.join('{}: {}'.format(status, action_summary[act][status])
for status in sorted(action_summary[act])))
for act in sorted(action_summary))))
def _display_suppressed_message(nsimilar, ndisplayed, last_ts, final=False):
# +1 because there was the original result + nsimilar displayed.
n_suppressed = nsimilar - ndisplayed + 1
if n_suppressed > 0:
ts = time()
# rate-limit update of suppression message, with a large number
# of fast-paced results updating for each one can result in more
# CPU load than the actual processing
# arbitrarily go for a 2Hz update frequency -- it "feels" good
if last_ts is None or final or (ts - last_ts > 0.5):
ui.message(' [{} similar {} been suppressed; disable with datalad.ui.suppress-similar-results=off]'
.format(n_suppressed,
single_or_plural("message has",
"messages have",
n_suppressed, False)),
cr="\n" if final else "\r")
return ts
return last_ts
def _process_results(
results,
cmd_class,
on_failure,
action_summary,
incomplete_results,
result_renderer,
result_log_level,
allkwargs):
# private helper pf @eval_results
# loop over results generated from some source and handle each
# of them according to the requested behavior (logging, rendering, ...)
# used to track repeated messages in the default renderer
last_result = None
last_result_ts = None
# which result dict keys to inspect for changes to discover repetitions
# of similar messages
repetition_keys = set(('action', 'status', 'type', 'refds'))
# counter for detected repetitions
result_repetitions = 0
# how many repetitions to show, before suppression kicks in
render_n_repetitions = \
dlcfg.obtain('datalad.ui.suppress-similar-results-threshold') \
if sys.stdout.isatty() \
and dlcfg.obtain('datalad.ui.suppress-similar-results') \
else float("inf")
for res in results:
if not res or 'action' not in res:
# XXX Yarik has to no clue on how to track the origin of the
# record to figure out WTF, so he just skips it
# but MIH thinks leaving a trace of that would be good
lgr.debug('Drop result record without "action": %s', res)
continue
actsum = action_summary.get(res['action'], {})
if res['status']:
actsum[res['status']] = actsum.get(res['status'], 0) + 1
action_summary[res['action']] = actsum
## log message, if there is one and a logger was given
msg = res.get('message', None)
# remove logger instance from results, as it is no longer useful
# after logging was done, it isn't serializable, and generally
# pollutes the output
res_lgr = res.pop('logger', None)
if msg and res_lgr:
if isinstance(res_lgr, logging.Logger):
# didn't get a particular log function, go with default
res_lgr = getattr(
res_lgr,
default_logchannels[res['status']]
if result_log_level == 'match-status'
else result_log_level)
msg = res['message']
msgargs = None
if isinstance(msg, tuple):
msgargs = msg[1:]
msg = msg[0]
if 'path' in res:
# result path could be a path instance
path = str(res['path'])
if msgargs:
# we will pass the msg for %-polation, so % should be doubled
path = path.replace('%', '%%')
msg = '{} [{}({})]'.format(
msg, res['action'], path)
if msgargs:
# support string expansion of logging to avoid runtime cost
try:
res_lgr(msg, *msgargs)
except TypeError as exc:
raise TypeError(
"Failed to render %r with %r from %r: %s"
% (msg, msgargs, res, exc_str(exc))
)
else:
res_lgr(msg)
## output rendering
# TODO RF this in a simple callable that gets passed into this function
if result_renderer is None or result_renderer == 'disabled':
pass
elif result_renderer == 'default':
trimmed_result = {k: v for k, v in res.items() if k in repetition_keys}
if res.get('status', None) != 'notneeded' \
and trimmed_result == last_result:
# this is a similar report, suppress if too many, but count it
result_repetitions += 1
if result_repetitions < render_n_repetitions:
default_result_renderer(res)
else:
last_result_ts = _display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts)
else:
# this one is new, first report on any prev. suppressed results
# by number, and then render this fresh one
last_result_ts = _display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts,
final=True)
default_result_renderer(res)
result_repetitions = 0
last_result = trimmed_result
elif result_renderer in ('json', 'json_pp'):
ui.message(json.dumps(
{k: v for k, v in res.items()
if k not in ('logger')},
sort_keys=True,
indent=2 if result_renderer.endswith('_pp') else None,
default=str))
elif result_renderer in ('tailored', 'default'):
if hasattr(cmd_class, 'custom_result_renderer'):
cmd_class.custom_result_renderer(res, **allkwargs)
elif hasattr(result_renderer, '__call__'):
try:
result_renderer(res, **allkwargs)
except Exception as e:
lgr.warning('Result rendering failed for: %s [%s]',
res, exc_str(e))
else:
raise ValueError('unknown result renderer "{}"'.format(result_renderer))
## error handling
# looks for error status, and report at the end via
# an exception
if on_failure in ('continue', 'stop') \
and res['status'] in ('impossible', 'error'):
incomplete_results.append(res)
if on_failure == 'stop':
# first fail -> that's it
# raise will happen after the loop
break
yield res
# make sure to report on any issues that we had suppressed
_display_suppressed_message(
result_repetitions, render_n_repetitions, last_result_ts, final=True)
def keep_result(res, rfilter, **kwargs):
if not rfilter:
return True
try:
if not rfilter(res, **kwargs):
# give the slightest indication which filter was employed
raise ValueError(
'excluded by filter {} with arguments {}'.format(rfilter, kwargs))
except ValueError as e:
# make sure to report the excluded result to massively improve
# debugging experience
lgr.debug('Not reporting result (%s): %s', exc_str(e), res)
return False
return True
def xfm_result(res, xfm):
if not xfm:
return res
return xfm(res)
| en | 0.85687 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 noet: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## Interface utility functions # avoid import from API to not get into circular imports # TODO: RF whenever merge conflict is not upon us # TODO remove # only `drop` and `uninstall` are still using this Detect and treat unsaved changes as instructed by `mode` Parameters ---------- ds : Dataset or None Dataset to be inspected. Does nothing if `None`. mode : {'fail', 'ignore', 'save-before'} How to act upon discovering unsaved changes. msg : str or None Custom message to use for a potential commit. Returns ------- None # nothing to be handled # make sure that all pending changes (batched annex operations, etc.) # are actually reflected in Git Return common root paths for a set of paths This function determines the smallest set of common root paths and sorts all given paths under the respective root. Returns ------- dict paths by root # sort all paths under their potential roots # start from the top to get all paths down the line # and collate them into as few roots as possible # this path is already covered by a known root # find all sub paths # TODO remove # only `remove` and `uninstall` use this, the uses path `path_is_subpath` Whether a given path is a subdirectory of any of the given test values Parameters ---------- values : sequence or dict Paths to be tested against. This can be a dictionary in which case all values from all keys will be tested against. path : path or None Test path. If None is given, the process' working directory is used. Returns ------- bool # need to protect against unsupported use of relpath() with # abspaths on windows from different drives (gh-3724) # different drives, enough evidence for "not under" # first match is enough # TODO(OPT)? YOH: from a cursory review seems like possibly an expensive function # whenever many paths were provided (e.g. via shell glob). # Might be worth testing on some usecase and py-spy'ing if notable portion # of time is spent. Discover the edges and nodes in a dataset tree to given target paths Parameters ---------- basepath : path Path to a start or top-level dataset. Really has to be a path to a dataset! targetpaths : list(path) Any non-zero number of paths that are termination points for the search algorithm. Can be paths to datasets, directories, or files (and any combination thereof). current_trace : list For a top-level call this should probably always be `[]` spec : dict `content_by_ds`-style dictionary that will receive information about the discovered datasets. Specifically, for each discovered dataset there will be an item with its path under the key (path) of the respective superdataset. includeds : sequence, optional Any paths given are treated as existing subdatasets, regardless of whether they can be found in the filesystem. Such subdatasets will appear under the key of the closest existing dataset in the `spec`. Returns ------- None Function calls itself recursively and populates `spec` dict in-place. Keys are dataset paths, values are sets of subdataset paths # convert to set for faster lookup # this beast walks the directory tree from a given `basepath` until # it discovers any of the given `targetpaths` # if it finds one, it commits any accummulated trace of visited # datasets on this edge to the spec # we are passing into a new dataset, extend the dataset trace # this edge is not done, we need to try to reach any downstream # dataset # if t != basepath) # whether anything in this directory matched a targetpath # we cannot have anything below this one # OPT listdir might be large and we could have only few items # in `targetpaths` -- so traverse only those in spec which have # leading dir basepath # filter targets matching this downward path # remove the matching ones from the "todo" list # go one deeper # TODO RF prepare proper annotated path dicts Wrap a filter into a helper to be able to accept additional arguments, if the filter doesn't support it already Decorator for return value evaluation of datalad commands. Note, this decorator is only compatible with commands that return status dict sequences! Two basic modes of operation are supported: 1) "generator mode" that `yields` individual results, and 2) "list mode" that returns a sequence of results. The behavior can be selected via the kwarg `return_type`. Default is "list mode". This decorator implements common functionality for result rendering/output, error detection/handling, and logging. Result rendering/output can be triggered via the `datalad.api.result-renderer` configuration variable, or the `result_renderer` keyword argument of each decorated command. Supported modes are: 'default' (one line per result with action, status, path, and an optional message); 'json' (one object per result, like git-annex), 'json_pp' (like 'json', but pretty-printed spanning multiple lines), 'tailored' custom output formatting provided by each command class (if any). Error detection works by inspecting the `status` item of all result dictionaries. Any occurrence of a status other than 'ok' or 'notneeded' will cause an IncompleteResultsError exception to be raised that carries the failed actions' status dictionaries in its `failed` attribute. Status messages will be logged automatically, by default the following association of result status and log channel will be used: 'ok' (debug), 'notneeded' (debug), 'impossible' (warning), 'error' (error). Logger instances included in the results are used to capture the origin of a status report. Parameters ---------- func: function __call__ method of a subclass of Interface, i.e. a datalad command definition # for result filters # we need to produce a dict with argname/argvalue pairs for all args # incl. defaults and args given as positionals # determine the command class associated with `wrapped` # retrieve common options from kwargs, and fall back on the command # class attributes, or general defaults if needed # we will pop, which might cause side-effect # go with any explicitly given default # otherwise determine the command class and pull any # default set in that class # or the common default # short cuts and configured setup for common options # resolve string labels for transformers too # use verbatim, if not a known label # TODO remove this conditional branch entirely, done outside # look for potential override of logging behavior # query cfg for defaults # .is_installed and .config can be costly, so ensure we do # it only once. See https://github.com/datalad/datalad/issues/3575 # look for hooks # this internal helper function actually drives the command # generator-style, it may generate an exception if desired, # on incomplete results # flag whether to raise an exception # track what actions were performed how many times # if a custom summary is to be provided, collect the results # of the command execution # process main results # execution # bookkeeping # communication # let renderers get to see how a command was called # run the hooks before we yield the result # this ensures that they are executed before # a potentially wrapper command gets to act # on them # a hook is also a command that yields results # so yield them outside too # users need to pay attention to void infinite # loops, i.e. when a hook yields a result that # triggers that same hook again # apply same logic as for main results, otherwise # any filters would only tackle the primary results # and a mixture of return values could happen # rationale for conditional is a few lines down # in case the result_xfm decided to not give us anything # exclude it from the results. There is no particular reason # to do so other than that it was established behavior when # this comment was written. This will not affect any real # result record # collect if summary is desired # result summary before a potential exception # custom first # give a summary in default mode, when there was more than one # action performed # hand over the generator # unwind generator if there is one, this actually runs # any processing # can happen, e.g., on windows with paths from different # drives. just go with the original path in this case # +1 because there was the original result + nsimilar displayed. # rate-limit update of suppression message, with a large number # of fast-paced results updating for each one can result in more # CPU load than the actual processing # arbitrarily go for a 2Hz update frequency -- it "feels" good # private helper pf @eval_results # loop over results generated from some source and handle each # of them according to the requested behavior (logging, rendering, ...) # used to track repeated messages in the default renderer # which result dict keys to inspect for changes to discover repetitions # of similar messages # counter for detected repetitions # how many repetitions to show, before suppression kicks in # XXX Yarik has to no clue on how to track the origin of the # record to figure out WTF, so he just skips it # but MIH thinks leaving a trace of that would be good ## log message, if there is one and a logger was given # remove logger instance from results, as it is no longer useful # after logging was done, it isn't serializable, and generally # pollutes the output # didn't get a particular log function, go with default # result path could be a path instance # we will pass the msg for %-polation, so % should be doubled # support string expansion of logging to avoid runtime cost ## output rendering # TODO RF this in a simple callable that gets passed into this function # this is a similar report, suppress if too many, but count it # this one is new, first report on any prev. suppressed results # by number, and then render this fresh one ## error handling # looks for error status, and report at the end via # an exception # first fail -> that's it # raise will happen after the loop # make sure to report on any issues that we had suppressed # give the slightest indication which filter was employed # make sure to report the excluded result to massively improve # debugging experience | 1.830221 | 2 |
rave_python/rave_base.py | MaestroJolly/rave-python | 0 | 6624907 | <reponame>MaestroJolly/rave-python<filename>rave_python/rave_base.py
import os, hashlib, warnings, requests, json
from rave_python.rave_exceptions import ServerError, RefundError
import base64
from Crypto.Cipher import DES3
from dotenv import load_dotenv
load_dotenv()
# OR, the same with increased verbosity:
load_dotenv(verbose=True)
# OR, explicitly providing path to '.env'
from pathlib import Path # python3 only
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
class RaveBase(object):
""" This is the core of the implementation. It contains the encryption and initialization functions. It also contains all direct rave functions that require publicKey or secretKey (refund) """
def __init__(self, publicKey=None, secretKey=None, usingEnv=True):
# config variables (protected)
self._baseUrlMap = "https://api.ravepay.co/"
self._endpointMap = {
"card": {
"charge": "flwv3-pug/getpaidx/api/charge",
"validate": "flwv3-pug/getpaidx/api/validatecharge",
"verify": "flwv3-pug/getpaidx/api/v2/verify",
"chargeSavedCard": "flwv3-pug/getpaidx/api/tokenized/charge",
},
"preauth": {
"charge": "flwv3-pug/getpaidx/api/tokenized/preauth_charge",
"capture": "flwv3-pug/getpaidx/api/capture",
"refundorvoid": "flwv3-pug/getpaidx/api/refundorvoid"
},
"account": {
"charge": "flwv3-pug/getpaidx/api/charge",
"validate": "flwv3-pug/getpaidx/api/validate",
"verify": "flwv3-pug/getpaidx/api/v2/verify"
},
"payment_plan": {
"create": "v2/gpx/paymentplans/create",
"fetch": "v2/gpx/paymentplans/query",
"list": "v2/gpx/paymentplans/query",
"cancel": "v2/gpx/paymentplans/",
"edit" : "v2/gpx/paymentplans/"
},
"subscriptions": {
"fetch": "v2/gpx/subscriptions/query",
"list": "v2/gpx/subscriptions/query",
"cancel": "v2/gpx/subscriptions/",
"activate" : "v2/gpx/subscriptions/"
},
"subaccount": {
"create": "v2/gpx/subaccounts/create",
"list": "v2/gpx/subaccounts/",
"fetch": "v2/gpx/subaccounts/get"
},
"transfer": {
"initiate": "v2/gpx/transfers/create",
"bulk": "v2/gpx/transfers/create_bulk",
"fetch": "v2/gpx/transfers",
"fee": "v2/gpx/transfers/fee",
"balance": "v2/gpx/balance",
"accountVerification": "flwv3-pug/getpaidx/api/resolve_account"
},
"verify": "flwv3-pug/getpaidx/api/v2/verify",
"refund": "gpx/merchant/transactions/refund"
}
# Setting up public and private keys (private)
#
# If we are using environment variables to store secretKey
if(usingEnv):
self.__publicKey = os.getenv("RAVE_PUBLIC_KEY")
self.__secretKey = os.getenv("RAVE_SECRET_KEY")
if (not self.__publicKey) or (not self.__secretKey):
raise ValueError("Please set your RAVE_PUBLIC_KEY and RAVE_SECRET_KEY environment variables. Otherwise, pass publicKey and secretKey as arguments and set usingEnv to false")
# If we are not using environment variables
else:
if (not publicKey) or (not secretKey):
raise ValueError("\n Please provide as arguments your publicKey and secretKey. \n It is advised however that you provide secret key as an environment variables. \n To do this, remove the usingEnv flag and save your keys as environment variables, RAVE_PUBLIC_KEY and RAVE_SECRET_KEY")
else:
self.__publicKey = publicKey
self.__secretKey = secretKey
# Raise warning about not using environment variables
warnings.warn("Though you can use the usingEnv flag to pass secretKey as an argument, it is advised to store it in an environment variable, especially in production.", SyntaxWarning)
# Setting instance variables
self._baseUrl = self._baseUrlMap
# encryption key (protected)
self._encryptionKey = self.__getEncryptionKey()
# This generates the encryption key (private)
def __getEncryptionKey(self):
""" This generates the encryption key """
if(self.__secretKey):
hashedseckey = hashlib.md5(self.__secretKey.encode("utf-8")).hexdigest()
hashedseckeylast12 = hashedseckey[-12:]
seckeyadjusted = self.__secretKey.replace('FLWSECK-', '')
seckeyadjustedfirst12 = seckeyadjusted[:12]
key = seckeyadjustedfirst12 + hashedseckeylast12
return key
raise ValueError("Please initialize RavePay")
# This returns the public key
def _getPublicKey(self):
return self.__publicKey
# This returns the secret key
def _getSecretKey(self):
return self.__secretKey
# This encrypts text
def _encrypt(self, plainText):
""" This is the encryption function.\n
Parameters include:\n
plainText (string) -- This is the text you wish to encrypt
"""
blockSize = 8
padDiff = blockSize - (len(plainText) % blockSize)
key = self.__getEncryptionKey()
cipher = DES3.new(key, DES3.MODE_ECB)
plainText = "{}{}".format(plainText, "".join(chr(padDiff) * padDiff))
# cipher.encrypt - the C function that powers this doesn't accept plain string, rather it accepts byte strings, hence the need for the conversion below
test = plainText.encode('utf-8')
encrypted = base64.b64encode(cipher.encrypt(test)).decode("utf-8")
return encrypted
| import os, hashlib, warnings, requests, json
from rave_python.rave_exceptions import ServerError, RefundError
import base64
from Crypto.Cipher import DES3
from dotenv import load_dotenv
load_dotenv()
# OR, the same with increased verbosity:
load_dotenv(verbose=True)
# OR, explicitly providing path to '.env'
from pathlib import Path # python3 only
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
class RaveBase(object):
""" This is the core of the implementation. It contains the encryption and initialization functions. It also contains all direct rave functions that require publicKey or secretKey (refund) """
def __init__(self, publicKey=None, secretKey=None, usingEnv=True):
# config variables (protected)
self._baseUrlMap = "https://api.ravepay.co/"
self._endpointMap = {
"card": {
"charge": "flwv3-pug/getpaidx/api/charge",
"validate": "flwv3-pug/getpaidx/api/validatecharge",
"verify": "flwv3-pug/getpaidx/api/v2/verify",
"chargeSavedCard": "flwv3-pug/getpaidx/api/tokenized/charge",
},
"preauth": {
"charge": "flwv3-pug/getpaidx/api/tokenized/preauth_charge",
"capture": "flwv3-pug/getpaidx/api/capture",
"refundorvoid": "flwv3-pug/getpaidx/api/refundorvoid"
},
"account": {
"charge": "flwv3-pug/getpaidx/api/charge",
"validate": "flwv3-pug/getpaidx/api/validate",
"verify": "flwv3-pug/getpaidx/api/v2/verify"
},
"payment_plan": {
"create": "v2/gpx/paymentplans/create",
"fetch": "v2/gpx/paymentplans/query",
"list": "v2/gpx/paymentplans/query",
"cancel": "v2/gpx/paymentplans/",
"edit" : "v2/gpx/paymentplans/"
},
"subscriptions": {
"fetch": "v2/gpx/subscriptions/query",
"list": "v2/gpx/subscriptions/query",
"cancel": "v2/gpx/subscriptions/",
"activate" : "v2/gpx/subscriptions/"
},
"subaccount": {
"create": "v2/gpx/subaccounts/create",
"list": "v2/gpx/subaccounts/",
"fetch": "v2/gpx/subaccounts/get"
},
"transfer": {
"initiate": "v2/gpx/transfers/create",
"bulk": "v2/gpx/transfers/create_bulk",
"fetch": "v2/gpx/transfers",
"fee": "v2/gpx/transfers/fee",
"balance": "v2/gpx/balance",
"accountVerification": "flwv3-pug/getpaidx/api/resolve_account"
},
"verify": "flwv3-pug/getpaidx/api/v2/verify",
"refund": "gpx/merchant/transactions/refund"
}
# Setting up public and private keys (private)
#
# If we are using environment variables to store secretKey
if(usingEnv):
self.__publicKey = os.getenv("RAVE_PUBLIC_KEY")
self.__secretKey = os.getenv("RAVE_SECRET_KEY")
if (not self.__publicKey) or (not self.__secretKey):
raise ValueError("Please set your RAVE_PUBLIC_KEY and RAVE_SECRET_KEY environment variables. Otherwise, pass publicKey and secretKey as arguments and set usingEnv to false")
# If we are not using environment variables
else:
if (not publicKey) or (not secretKey):
raise ValueError("\n Please provide as arguments your publicKey and secretKey. \n It is advised however that you provide secret key as an environment variables. \n To do this, remove the usingEnv flag and save your keys as environment variables, RAVE_PUBLIC_KEY and RAVE_SECRET_KEY")
else:
self.__publicKey = publicKey
self.__secretKey = secretKey
# Raise warning about not using environment variables
warnings.warn("Though you can use the usingEnv flag to pass secretKey as an argument, it is advised to store it in an environment variable, especially in production.", SyntaxWarning)
# Setting instance variables
self._baseUrl = self._baseUrlMap
# encryption key (protected)
self._encryptionKey = self.__getEncryptionKey()
# This generates the encryption key (private)
def __getEncryptionKey(self):
""" This generates the encryption key """
if(self.__secretKey):
hashedseckey = hashlib.md5(self.__secretKey.encode("utf-8")).hexdigest()
hashedseckeylast12 = hashedseckey[-12:]
seckeyadjusted = self.__secretKey.replace('FLWSECK-', '')
seckeyadjustedfirst12 = seckeyadjusted[:12]
key = seckeyadjustedfirst12 + hashedseckeylast12
return key
raise ValueError("Please initialize RavePay")
# This returns the public key
def _getPublicKey(self):
return self.__publicKey
# This returns the secret key
def _getSecretKey(self):
return self.__secretKey
# This encrypts text
def _encrypt(self, plainText):
""" This is the encryption function.\n
Parameters include:\n
plainText (string) -- This is the text you wish to encrypt
"""
blockSize = 8
padDiff = blockSize - (len(plainText) % blockSize)
key = self.__getEncryptionKey()
cipher = DES3.new(key, DES3.MODE_ECB)
plainText = "{}{}".format(plainText, "".join(chr(padDiff) * padDiff))
# cipher.encrypt - the C function that powers this doesn't accept plain string, rather it accepts byte strings, hence the need for the conversion below
test = plainText.encode('utf-8')
encrypted = base64.b64encode(cipher.encrypt(test)).decode("utf-8")
return encrypted | en | 0.802604 | # OR, the same with increased verbosity: # OR, explicitly providing path to '.env' # python3 only This is the core of the implementation. It contains the encryption and initialization functions. It also contains all direct rave functions that require publicKey or secretKey (refund) # config variables (protected) # Setting up public and private keys (private) # # If we are using environment variables to store secretKey # If we are not using environment variables # Raise warning about not using environment variables # Setting instance variables # encryption key (protected) # This generates the encryption key (private) This generates the encryption key # This returns the public key # This returns the secret key # This encrypts text This is the encryption function.\n Parameters include:\n plainText (string) -- This is the text you wish to encrypt # cipher.encrypt - the C function that powers this doesn't accept plain string, rather it accepts byte strings, hence the need for the conversion below | 2.246821 | 2 |
src/simian/mac/munki/handlers/pkgs.py | tristansgray/simian | 326 | 6624908 | <gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to handle /pkgs"""
import httplib
import logging
import urllib
from google.appengine.api import memcache
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import common
from simian.mac.munki import handlers
def PackageExists(filename):
"""Check whether a package exists.
Args:
filename: str, package filename like 'foo.dmg'
Returns:
True or False
"""
return models.PackageInfo.get_by_key_name(filename) is not None
class Packages(
handlers.AuthenticationHandler,
blobstore_handlers.BlobstoreDownloadHandler):
"""Handler for /pkgs/"""
def get(self, filename):
"""GET
Args:
filename: str, package filename like 'foo.dmg'
Returns:
None if a blob is being returned,
or a response object
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not any((auth.IsAdminUser(email),
auth.IsSupportUser(email),
)):
raise auth.IsAdminMismatch
filename = urllib.unquote(filename)
pkg = models.PackageInfo.MemcacheWrappedGet(filename)
if pkg is None or not pkg.blobstore_key:
self.error(httplib.NOT_FOUND)
return
if common.IsPanicModeNoPackages():
self.error(httplib.SERVICE_UNAVAILABLE)
return
# Get the Blobstore BlobInfo for this package; memcache wrapped.
memcache_key = 'blobinfo_%s' % filename
blob_info = memcache.get(memcache_key)
if not blob_info:
blob_info = blobstore.BlobInfo.get(pkg.blobstore_key)
if blob_info:
memcache.set(memcache_key, blob_info, 300) # cache for 5 minutes.
else:
logging.error(
'Failure fetching BlobInfo for %s. Verify the blob exists: %s',
pkg.filename, pkg.blobstore_key)
self.error(httplib.NOT_FOUND)
return
header_date_str = self.request.headers.get('If-Modified-Since', '')
etag_nomatch_str = self.request.headers.get('If-None-Match', 0)
etag_match_str = self.request.headers.get('If-Match', 0)
pkg_date = blob_info.creation
pkg_size_bytes = blob_info.size
# TODO(user): The below can be simplified once all of our clients
# have ETag values set on the filesystem for these files. The
# parsing of If-Modified-Since could be removed. Removing it prematurely
# will cause a re-download of all packages on all clients for 1 iteration
# until they all have ETag values.
# Reduce complexity of elif conditional below.
# If an If-None-Match: ETag is supplied, don't worry about a
# missing file modification date -- the ETag supplies everything needed.
if etag_nomatch_str and not header_date_str:
resource_expired = False
else:
resource_expired = handlers.IsClientResourceExpired(
pkg_date, header_date_str)
# Client supplied If-Match: etag, but that etag does not match current
# etag. return 412.
if (etag_match_str and pkg.pkgdata_sha256 and
etag_match_str != pkg.pkgdata_sha256):
self.response.set_status(412)
# Client supplied no etag or If-No-Match: etag, and the etag did not
# match, or the client's file is older than the mod time of this package.
elif ((etag_nomatch_str and pkg.pkgdata_sha256 and
etag_nomatch_str != pkg.pkgdata_sha256) or resource_expired):
self.response.headers['Content-Disposition'] = str(
'attachment; filename=%s' % filename)
# header date empty or package has changed, send blob with last-mod date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.headers['Last-Modified'] = pkg_date.strftime(
handlers.HEADER_DATE_FORMAT)
self.response.headers['X-Download-Size'] = str(pkg_size_bytes)
self.send_blob(pkg.blobstore_key)
else:
# Client doesn't need to do anything, current version is OK based on
# ETag and/or last modified date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.set_status(httplib.NOT_MODIFIED)
class ClientRepair(Packages):
"""Handler for /repair/"""
def get(self, client_id_str=''):
"""GET
Returns:
None if a blob is being returned,
or a response object
"""
session = auth.DoAnyAuth()
client_id = handlers.GetClientIdForRequest(
self.request, session=session, client_id_str=client_id_str)
logging.info('Repair client ID: %s', client_id)
filename = None
for pkg in models.PackageInfo.all().filter('name =', 'munkitools'):
if client_id.get('track', '') in pkg.catalogs:
filename = pkg.filename
break
if filename:
logging.info('Sending client: %s', filename)
super(ClientRepair, self).get(filename)
else:
logging.warning('No repair client found.')
| #!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to handle /pkgs"""
import httplib
import logging
import urllib
from google.appengine.api import memcache
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from simian.mac import models
from simian.mac.common import auth
from simian.mac.munki import common
from simian.mac.munki import handlers
def PackageExists(filename):
"""Check whether a package exists.
Args:
filename: str, package filename like 'foo.dmg'
Returns:
True or False
"""
return models.PackageInfo.get_by_key_name(filename) is not None
class Packages(
handlers.AuthenticationHandler,
blobstore_handlers.BlobstoreDownloadHandler):
"""Handler for /pkgs/"""
def get(self, filename):
"""GET
Args:
filename: str, package filename like 'foo.dmg'
Returns:
None if a blob is being returned,
or a response object
"""
auth_return = auth.DoAnyAuth()
if hasattr(auth_return, 'email'):
email = auth_return.email()
if not any((auth.IsAdminUser(email),
auth.IsSupportUser(email),
)):
raise auth.IsAdminMismatch
filename = urllib.unquote(filename)
pkg = models.PackageInfo.MemcacheWrappedGet(filename)
if pkg is None or not pkg.blobstore_key:
self.error(httplib.NOT_FOUND)
return
if common.IsPanicModeNoPackages():
self.error(httplib.SERVICE_UNAVAILABLE)
return
# Get the Blobstore BlobInfo for this package; memcache wrapped.
memcache_key = 'blobinfo_%s' % filename
blob_info = memcache.get(memcache_key)
if not blob_info:
blob_info = blobstore.BlobInfo.get(pkg.blobstore_key)
if blob_info:
memcache.set(memcache_key, blob_info, 300) # cache for 5 minutes.
else:
logging.error(
'Failure fetching BlobInfo for %s. Verify the blob exists: %s',
pkg.filename, pkg.blobstore_key)
self.error(httplib.NOT_FOUND)
return
header_date_str = self.request.headers.get('If-Modified-Since', '')
etag_nomatch_str = self.request.headers.get('If-None-Match', 0)
etag_match_str = self.request.headers.get('If-Match', 0)
pkg_date = blob_info.creation
pkg_size_bytes = blob_info.size
# TODO(user): The below can be simplified once all of our clients
# have ETag values set on the filesystem for these files. The
# parsing of If-Modified-Since could be removed. Removing it prematurely
# will cause a re-download of all packages on all clients for 1 iteration
# until they all have ETag values.
# Reduce complexity of elif conditional below.
# If an If-None-Match: ETag is supplied, don't worry about a
# missing file modification date -- the ETag supplies everything needed.
if etag_nomatch_str and not header_date_str:
resource_expired = False
else:
resource_expired = handlers.IsClientResourceExpired(
pkg_date, header_date_str)
# Client supplied If-Match: etag, but that etag does not match current
# etag. return 412.
if (etag_match_str and pkg.pkgdata_sha256 and
etag_match_str != pkg.pkgdata_sha256):
self.response.set_status(412)
# Client supplied no etag or If-No-Match: etag, and the etag did not
# match, or the client's file is older than the mod time of this package.
elif ((etag_nomatch_str and pkg.pkgdata_sha256 and
etag_nomatch_str != pkg.pkgdata_sha256) or resource_expired):
self.response.headers['Content-Disposition'] = str(
'attachment; filename=%s' % filename)
# header date empty or package has changed, send blob with last-mod date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.headers['Last-Modified'] = pkg_date.strftime(
handlers.HEADER_DATE_FORMAT)
self.response.headers['X-Download-Size'] = str(pkg_size_bytes)
self.send_blob(pkg.blobstore_key)
else:
# Client doesn't need to do anything, current version is OK based on
# ETag and/or last modified date.
if pkg.pkgdata_sha256:
self.response.headers['ETag'] = str(pkg.pkgdata_sha256)
self.response.set_status(httplib.NOT_MODIFIED)
class ClientRepair(Packages):
"""Handler for /repair/"""
def get(self, client_id_str=''):
"""GET
Returns:
None if a blob is being returned,
or a response object
"""
session = auth.DoAnyAuth()
client_id = handlers.GetClientIdForRequest(
self.request, session=session, client_id_str=client_id_str)
logging.info('Repair client ID: %s', client_id)
filename = None
for pkg in models.PackageInfo.all().filter('name =', 'munkitools'):
if client_id.get('track', '') in pkg.catalogs:
filename = pkg.filename
break
if filename:
logging.info('Sending client: %s', filename)
super(ClientRepair, self).get(filename)
else:
logging.warning('No repair client found.') | en | 0.801031 | #!/usr/bin/env python # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Module to handle /pkgs Check whether a package exists. Args: filename: str, package filename like 'foo.dmg' Returns: True or False Handler for /pkgs/ GET Args: filename: str, package filename like 'foo.dmg' Returns: None if a blob is being returned, or a response object # Get the Blobstore BlobInfo for this package; memcache wrapped. # cache for 5 minutes. # TODO(user): The below can be simplified once all of our clients # have ETag values set on the filesystem for these files. The # parsing of If-Modified-Since could be removed. Removing it prematurely # will cause a re-download of all packages on all clients for 1 iteration # until they all have ETag values. # Reduce complexity of elif conditional below. # If an If-None-Match: ETag is supplied, don't worry about a # missing file modification date -- the ETag supplies everything needed. # Client supplied If-Match: etag, but that etag does not match current # etag. return 412. # Client supplied no etag or If-No-Match: etag, and the etag did not # match, or the client's file is older than the mod time of this package. # header date empty or package has changed, send blob with last-mod date. # Client doesn't need to do anything, current version is OK based on # ETag and/or last modified date. Handler for /repair/ GET Returns: None if a blob is being returned, or a response object | 2.131469 | 2 |
raspi/Final_Solution/telem_and_video_stream.py | blake-shaffer/avionics | 3 | 6624909 | <reponame>blake-shaffer/avionics
#TODO: UDPATE THIS HEADER -> THIS IS NOT CORRECT
#This script does the following:
# 1) Record H264 Video using PiCam at a maximum bitrate of {bitrate_max} kbps
# 2) Record video data to a local BytesIO object
# 3) Send raw data over a TCP socket to a ground server
# 4) Store raw data to an onboard file
# 5) Clears BytesIO object after network stream and file store
# 6) Interrupts and ends recording after 'record_time' seconds
# Client: RASPI
# Server: COMPUTER
# Author: <NAME>
# Last Edited: 11/3/19
# Libraries
# -> picamera -> PiCamera: Enables pi cam interfacing and settings manipulation
# -> picamera -> PiCameraCircularIO: Allows for a circular buffer (if we want one)
# -> threading: enables timer interrupt
# -> io -> BytesIO : local file-like object that camera streams to
# -> socket: allows for UDP socket and message sending
# -> time: used to measure timing aspects of the system
# -> os: runs terminal commands from python
# -> sys: used exclusively for exiting the program
from picamera import PiCamera
from picamera import CircularIO
from io import BytesIO
import threading
import socket
import time
import os
import sys
import json
import serial
import struct
from numpy import array
import selectors
# add nav directory to sys.path
sys.path.append("../../")
import nav.NavMain
home = '/home/pi'
store_dir = home + "/rocket_data"
cmd = "mkdir " + store_dir
os.system(cmd)
log_file = store_dir + "/system_log.txt"
log_handle = open(log_file, 'w')
def get_time():
absolute_tm = time.localtime()
ms = time.time()
ms = ms - int(ms)
return str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5]) + ":%.3f| "%(ms)
def log_start(msg):
log_handle.write(("\n" + get_time() + msg))
absolute_tm = time.localtime()
time_str = "Script Started at " + str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5])
time_str += " on " + str(absolute_tm[1]) + "/" + str(absolute_tm[2]) + "/" + str(absolute_tm[0])
log_handle.write(time_str)
#============== MACROS ===============================
#Constant Macros
PI = 3.1415926535
#Network Macros
STREAM_READ = 1
STREAM_WRITE = 2
#================================== Networking Class =========================
class client_stream:
def __init__(self, name, server_IP, server_port, read_store_file, write_store_file, mode):
self.name = name
self.server_IP = server_IP
self.server_port = server_port
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.read_store_file = read_store_file
self.write_store_file = write_store_file
self.mode = mode #can be STREAM_READ, STREAM_WRTIE, or STREAM_READ|STREAM_WRITE
self.alive = False
self.read_file = False
self.write_file = False
self.print_output = True
self.log_output = True
if (mode & STREAM_READ == STREAM_READ):
self.read_buffer = BytesIO()
self.read_file_handle = open(read_store_file, 'wb')
self.read_file = True
if (mode & STREAM_WRITE == STREAM_WRITE):
self.write_buffer = BytesIO()
self.write_file_handle = open(write_store_file, 'wb')
self.write_file = True
#Statistics [OPTIONAL]
self.recv_packet_cnt = 0
self.recv_total_bytes = 0
self.send_packet_cnt = 0
self.send_total_bytes = 0
def __bool__(self):
return self.alive
def print_statistics(self):
mode_name = ['STREAM_READ', 'STREAM_WRITE', 'STREAM_READ|STREAM_WRITE']
print("Stream Name: %s"%(self.name))
print("Stream Mode: %s"%(mode_name[self.mode-1]))
print("Read:\n\tPackets: %d\n\tTotal Bytes: %d"%(self.recv_packet_cnt, self.recv_total_bytes))
print("Write:\n\tPackets: %d\n\tTotal Bytes: %d"%(self.send_packet_cnt, self.send_total_bytes))
def stream_print(self, msg):
if (not(self.print_output)):
return
print("%s: %s"%(self.name, msg))
def log_print(self, msg):
if (not(self.log_output)):
return
log_start("%s: %s"%(self.name, msg))
def force_bytes(self, msg):
if isinstance(msg, bytes):
return msg
elif isinstance(msg, str):
return msg.encode('utf-8')
elif isinstance(msg, int):
return struct.pack('>i', msg)
elif isinstance(msg, float):
return struct.pack('>f', msg)
elif isinstance(msg, bool):
return struct.pack('>?', msg)
else:
return None
def register_with_server(self):
if not(self.alive):
return
self.stream_print("Registering with server")
self.log_print("Registering with server")
if (self.name == 'VIDEO'):
self.send_packet(b'vid src')
if (self.name == 'TELEMETRY'):
self.send_packet(b'telem src')
self.send_packet(b'im alive')
def connect_to_server(self):
connect_cnt = 0
self.log_print("Attempting to connect to server")
while connect_cnt < 5:
self.stream_print("Server connection attempt #%d"%(connect_cnt+1))
try:
self.socket_obj.connect_ex((self.server_IP, self.server_port))
self.stream_print("Connection Successful")
self.log_print("Connection Successful")
self.alive = True
break
except:
connect_cnt += 1
self.register_with_server()
def store_buffer(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
return
if(not(self.read_file)):
return
self.stream_print("Storing READ Buffer: %d Bytes"%(self.get_buffer_size(STREAM_READ)))
self.read_file_handle.write(self.read_buffer.getvalue())
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
return
if(not(self.write_file)):
return
self.stream_print("Storing WRITE Buffer: %d Bytes"%(self.get_buffer_size(STREAM_WRITE)))
self.write_file_handle.write(self.write_buffer.getvalue())
def get_buffer_size(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
return self.read_buffer.getbuffer().nbytes
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
return self.write_buffer.getbuffer().nbytes
def clear_buffer(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
if (self.get_buffer_size(STREAM_READ) == 0):
return
self.stream_print("Clearing READ Buffer")
self.read_buffer.truncate(0)
self.read_buffer.seek(0)
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
if (self.get_buffer_size(STREAM_WRITE) == 0):
return
self.stream_print("Clearing WRITE Buffer")
self.write_buffer.truncate(0)
self.write_buffer.seek(0)
def close(self):
self.stream_print("RUNNING FULL CLOSE")
if (not(self.mode & STREAM_READ)):
pass
else:
self.store_buffer(STREAM_READ)
self.clear_buffer(STREAM_READ)
self.close_file(STREAM_READ)
if (not(self.mode & STREAM_WRITE)):
pass
else:
if (self.get_buffer_size(STREAM_WRITE) > 0):
self.send_packet(self.write_buffer.getvalue())
self.store_buffer(STREAM_WRITE)
self.clear_buffer(STREAM_WRITE)
self.close_file(STREAM_WRITE)
self.close_socket()
def close_socket(self):
if (not(self.alive)):
return
self.alive = False
self.stream_print("Closing Socket")
self.socket_obj.close()
def close_file(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
return
if(not(self.read_file)):
return
self.stream_print("Closing READ File")
self.read_file_handle.close()
self.read_file = False
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
return
if(not(self.write_file)):
return
self.stream_print("Closing WRITE File")
self.write_file_handle.close()
self.write_file = False
def add_to_buffer(self, msg, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
self.stream_print("Adding to READ Buffer")
self.read_buffer.write(msg)
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
self.stream_print("Adding to WRITE Buffer")
self.write_buffer.write(msg)
def send_packet(self, msg):
if (not(self.alive)):
return
transmitted = False
self.stream_print("Running SEND_PACKET")
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
else:
msg = self.force_bytes(msg)
if msg is None:
self.stream_print("ERROR BAD DATA TYPE: NOT SERIALIZABLE -> Packet not sent")
return
try:
self.send_packet_cnt += 1
self.send_total_bytes += len(msg)
self.socket_obj.sendall(msg)
self.stream_print("Packet %d | Size (%d) Sent successfully"%(self.send_packet_cnt, len(msg)))
transmitted = True
except:
self.stream_print("ERROR Sending Message, closing socket")
self.log_print("ERROR Sending Message, closing socket")
self.close_socket()
def recv_new_packet(self):
if (not(self.alive)):
return None
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
else:
packet = self.socket_obj.recv(4096)
if (not(packet)):
self.stream_print("Stream ended, storing, then closing connection and file")
self.log_print("Stream ended, storing, then closing connection and file")
self.close_socket()
return None
search = "KILL STREAM"
if packet.find(search.encode('utf-8')) != -1:
self.stream_print("KILL statement heard, doing full close")
self.log_print("KILL statement heard, doing full close")
return "kill"
self.stream_print("%s: New Packet | Size: %d Bytes"%(self.name, len(packet)))
self.read_buffer.write(packet)
self.recv_packet_cnt += 1
self.recv_total_bytes += len(packet)
return None
def wait_for_start(self, time_out):
start = time.time()
time_diff = time.time()-start
sel = selectors.DefaultSelector()
events = selectors.EVENT_READ|selectors.EVENT_WRITE
sel.register(self.socket_obj, events, data="onlyDamnSocket")
last_time = int(time_out)
while(time_diff < time_out):
events = sel.select(timeout = 1.0)
for key, mask in events:
if key.data == "onlyDamnSocket" and mask == selectors.EVENT_READ|selectors.EVENT_WRITE:
self.stream_print("EVENT FOUND")
packet = self.socket_obj.recv(4096)
if (not(packet)):
self.stream_print("Stream ended, storing, then closing connection and file")
self.log_print("Stream ended, storing, then closing connection and file")
self.close_socket()
return
search = "turn you on"
self.stream_print("Packet Has data")
if packet.find(search.encode('utf-8')) != -1:
self.stream_print("STARTUP FOUND!")
return
time_diff = time.time()-start
if int(time_diff) != last_time:
msg = "Time left till automatic start; %.2f"%(time_out-time_diff)
self.stream_print(msg)
self.send_packet(msg.encode('utf-8'))
last_time = int(time_diff)
return
#================================== Serial Class =========================
class teensy_handle:
def __init__(self):
self.baudrate = 115200
#self.serial_port = '/dev/ttyACM0' #USB serial
self.serial_port = '/dev/ttyAMA0' #Serial pins TX/RX -> 14/15
self.ser = None
self.status = [0,0,0] #teensy, imu, gps, alt
self.connected = False
self.alive = False
self.print_output = True
self.log_output = True
def __bool__(self):
return self.alive
def stream_print(self, msg):
if (not(self.print_output)):
return
print("TEENSY: %s"%(msg))
def log_print(self, msg):
if (not(self.log_output)):
return
log_start("TEENSY: %s"%(msg))
def connect(self):
connect_cnt = 0
while (not(self.connected)):
try:
self.ser = serial.Serial(self.serial_port, self.baudrate, timeout=0.1)
self.connected = True
self.stream_print("Connected!")
self.log_print("Connected!")
return True
except:
connect_cnt += 1
self.stream_print("Trying to Connect: Attempt #%d"%(connect_cnt))
if connect_cnt > 10:
self.stream_print("Not found, Unable to Connect")
self.log_print("Not found, Unable to Connect")
return False
def start_up(self, time_out):
if (not(self.connected)):
return
starting = False
self.stream_print("Starting Startup")
resp = b''
for i in range(0,5):
self.ser.write(b'startup')
resp = self.ser.readline()
if resp.find(b'starting') != -1:
starting = True
break
if (not(starting)):
self.stream_print("ERROR Teensy never heard startup")
self.log_print("ERROR Teensy never heard startup")
return
resp = b''
self.stream_print("Started and waiting for initialized from teensy")
start = time.time()
while(resp.find(b'initialized')):
#do error analysis of output in here
resp = self.ser.readline()
if resp:
self.stream_print(resp)
if time.time()-start > time_out:
self.log_print("ERROR Teensy Watch Dog Timed out")
self.stream_print("ERROR Teensy Watch Dog Timed out")
return
self.stream_print("Teensy successfully started")
self.log_print("Teensy successfully started")
self.alive = True
def start_stream(self):
if (not(self.connected)):
return
self.stream_print("Beginning JSON Stream")
self.log_print("Beginning JSON Stream")
self.ser.write(b'dump')
def read_in_json(self):
if (not(self.connected)):
return
JSON_packet = b''
try:
JSON_packet = self.ser.readline()
except:
self.alive = False
if (len(JSON_packet)):
self.stream_print("New Packet: %s"%(JSON_packet))
if (JSON_packet.find(b'dump_init') != -1):
self.alive = False
for i in range(0,3):
self.ser.write(b'startup')
if (JSON_packet.find(b'initialized') != -1):
self.ser.write(b'dump')
self.alive = True
corrupt = False
try:
JSON_obj = json.loads(JSON_packet)
except:
self.stream_print("Error Creating JSON Obj; Invalid String")
corrupt = True
pass
if (not(corrupt)):
return JSON_obj
else:
return False
#TODO Create rocket class to record information and inform system operation
#keep track of:
# max speed(resultant and DOF)
# max acc(resultant and DOF)
# max_height, height
# whether it is back on the ground AGAIN
# timesince on the ground
#======================= Global Variables and Objects =================
vid_record_file = store_dir + '/video_stream.h264' #on-board file video is stored to
telem_record_file = store_dir + '/telemtry_stream.txt'
telem_cmd_file = store_dir + '/telemetry_cmds.txt'
bitrate_max = 200000 # bits per second
record_time = 60 # Time in seconds that the recording runs for
record_chunk = 0.12 #chunk size in seconds video object is broken into and sent
frame_rate = 15 #camera frame rate
interrupt_bool = False #global interrupt flag that ends recording/program
store_and_send_bool = False #global interrupt flag that initiates sending and storing of camera data
#ensures chunk size is not smaller than one frame
if record_chunk < 1/frame_rate:
record_chunk = 1/frame_rate
#Camera Settings
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = frame_rate
#Network Settings
SERVER_IP = '192.168.3.11'
SERVER_IP = '192.168.3.11'
SERVER_VIDEO_PORT = 5000
SERVER_TELEM_PORT = 5001
#Create stream objects for video and telemetry
#def __init__(self, name, server_IP, server_port, read_store_file, write_store_file, mode):
video_stream = client_stream("VIDEO", SERVER_IP, SERVER_VIDEO_PORT, None, vid_record_file, STREAM_WRITE)
telem_stream = client_stream("TELEMETRY", SERVER_IP, SERVER_TELEM_PORT, telem_cmd_file, telem_record_file, STREAM_WRITE|STREAM_READ)
#Create Selector object to allow for non-blocking read of telemetry port
main_sel = selectors.DefaultSelector()
telem_events = selectors.EVENT_READ|selectors.EVENT_WRITE
main_sel.register(telem_stream.socket_obj, telem_events, data="Telem_upstream")
#========================= Functions =================================
def interrupt_func():
#Interrupt function that ends camera streaming and program
global interrupt_bool
interrupt_bool = True
print("Program Timer up")
def store_interrupt_func():
#interrupt function that initiates sending and storing camera data
global store_and_send_bool
store_and_send_bool = True
#threading.Timer(record_chunk, store_interrupt_func).start()
def get_new_state(current_state, JSON_packet, previous_millis):
time_diff = JSON_packet["hdr"][1] - previous_millis
if time_diff > 500:
#integer value is number of milliseconds since the last observation:
#New samples should come every 100ms
#buffer of 500ms is there to protect from bogus values (and hence state propogation) from first packet, or first packet after long drop out
#therefore, if it is greater, ignore this packet and return the same state
return current_state
else:
time_diff = float(time_diff)/1000.0
imu_data = JSON_packet["imu"]
gps_data = JSON_packet["gps"]
alt_data = JSON_packet["tpa"]
altitude = float(alt_data[2])
#convert JSON_packet to python dict
sensor_readings = {
"time": time_diff,
"altitude": altitude,
"gps": array(gps_data),
"accel_nc": array(imu_data[7:10]),
"accel_c": array(imu_data[0:3]),
"angular_velocity": array(imu_data[13:16]),
"q_inert_to_body": array(imu_data[3:7])
}
#recieve updated state
updated_state = nav.NavMain.main(current_state, sensor_readings)
#return updated state
return updated_state
def form_bin_packet(current_state, packet_cnt, status_list):
#take in current state dict
MET = current_state["time"]
position = current_state["position"]
velocity = current_state["velocity"]
attitude = current_state["attitude"]
#form binary packet
packet_bytes = bytearray([192, 222]) #0xC0DE in hex (BEGINNING OF PACKET)
packet_bytes += bytearray(struct.pack('>if', packet_cnt, MET))
packet_bytes += bytearray(struct.pack('>???????', status_list[0], status_list[1],status_list[2],status_list[3],status_list[4],status_list[5],status_list[6]))
packet_bytes += bytearray(struct.pack('>fff', position[0], position[1], position[2]))
packet_bytes += bytearray(struct.pack('>fff', velocity[0], velocity[1], velocity[2]))
packet_bytes += bytearray(struct.pack('>ffff', attitude[0], attitude[1], attitude[2], attitude[3]))
packet_bytes += bytearray([237,12]) #0xED0C in hex (END OF PACKET)
#return binary packet
return packet_bytes
def populate_status_list(status_list, JSON_packet, telem_stream_alive, video_stream_alive, teensy_alive):
#IMU, GPS, ALT, Teensy, Raspi, LTE, Serial
if JSON_packet is not(None):
status_list[1] = JSON_packet['hdr'][2] #GPS
status_list[2] = JSON_packet['hdr'][3] #ALT
status_list[0] = JSON_packet['hdr'][4] #IMU
if (telem_stream_alive or video_stream_alive):
status_list[5] = True
else:
status_list[5] = False
if (teensy_alive):
status_list[3] = True
else:
status_list[3] = False
#======================== Video/Telemetry Streaming and Recording ============
loop_cnt = 0.0
cnt = 0
#Navigation Variables
current_state = {
"time": 0.0,
"position":array([0.0, 0.0, 0.0]),
"velocity":array([0.0, 0.0, 0.0]),
"attitude":array([1.0, 0.0, 0.0, 0.0])
}
status_list = [False, False, False, False, False, False, False] #IMU, GPS, ALT, Teensy, Raspi, LTE, Serial
previous_millis = 0
#Connect to Server
video_stream.connect_to_server()
telem_stream.connect_to_server()
#Connect to Teensy and do hand shake
teensy = teensy_handle()
status_list[6] = teensy.connect()
teensy.start_up(31)
#Wait for startup signal from server
if (telem_stream.alive):
telem_stream.wait_for_start(40) #value here is the timeout
status_list[4] = True
populate_status_list(status_list, None, telem_stream.alive, video_stream.alive, teensy.alive)
print("STARTING STREAM")
#=================================== Offical Beginning of Stream -> Do all setup before this =============
#Begin Pi Cam recording
camera.start_recording(video_stream.write_buffer, format='h264', bitrate=bitrate_max)
#Start timer threads
#threading.Timer(record_time, interrupt_func).start()
threading.Timer(record_chunk, store_interrupt_func).start()
#Start dump of data from Teensy:
teensy.start_stream()
program_start = time.time()
#Main Program Loop
while not(interrupt_bool): #TODO while (telem_stream or video_stream or {rocket has been up and back down to the ground for a significant amount of time} maybe use a class to carry this out
#Look at selector for any events then move on to rest:
events = main_sel.select(timeout=0.05)
for key, mask in events:
if ((key.data == "Telem_upstream") and (mask == selectors.EVENT_READ|selectors.EVENT_WRITE)):
res = telem_stream.recv_new_packet()
if isinstance(res, str):
if res == "kill":
print("KILL SWITCH RECIEVED -> CLOSING STREAMS AND ENDING PROGRAM")
interrupt_bool = True
#pull in new packet from teensy (TIMEOUT IS EVERY 0.1 SECONDS SO THIS WILL BLOCK FOR AT LEAST 0.1s)
new_JSON = teensy.read_in_json()
packet_Bytes = False
if (not(new_JSON)):
pass
else:
#New JSON from teensy -> use information to propagate state and any other essential values
current_state = get_new_state(current_state, new_JSON, previous_millis)
previous_millis = new_JSON["hdr"][1]
populate_status_list(status_list, new_JSON, telem_stream.alive, video_stream.alive, teensy.alive)
packet_Bytes = form_bin_packet(current_state, new_JSON["hdr"][0], status_list)
if (packet_Bytes):
#New Telemetry Data: Add to the buffer
telem_stream.add_to_buffer(packet_Bytes, STREAM_WRITE)
#If buffer gets to a certain size, send and store
packet_size = len(packet_Bytes)
if (telem_stream.get_buffer_size(STREAM_WRITE)/packet_size > 10):
#Send Buffer over Network
if (telem_stream):
telem_stream.send_packet(telem_stream.write_buffer.getvalue())
else:
telem_stream.connect_to_server()
#Store Buffer to File
telem_stream.store_buffer(STREAM_WRITE)
#Clear Buffer
telem_stream.clear_buffer(STREAM_WRITE)
#Camera Store and Send
# -> operates on global bool which is flipped by timer
# -> timer goes off every {record_chunk}s
# -> should be a little less than the serial timeout + selector timeout
if (store_and_send_bool):
#Reset Timer
threading.Timer(record_chunk, store_interrupt_func).start()
#Reset global interrupt flag
store_and_send_bool = False
#Send Video Data over Network
if (video_stream):
video_stream.send_packet(video_stream.write_buffer.getvalue())
else:
video_stream.connect_to_server()
#Store Data to File
video_stream.store_buffer(STREAM_WRITE)
#Clear Buffer
video_stream.clear_buffer(STREAM_WRITE)
#======================================================================================
#check to see if telem stream or video stream are still open, and if so close/echo kill
if (video_stream):
video_stream.send_packet(b'KILL STREAM')
video_stream.close()
if (telem_stream):
video_stream.send_packet(b'KILL STREAM')
video_stream.close()
#End Recording and Tidy Up
total_time = time.time() - program_start
log_start("Stream Ended, Closing sockets and files")
video_stream.print_statistics()
telem_stream.print_statistics()
absolute_tm = time.localtime()
time_str = "\nScript Ended at " + str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5])
time_str += " on " + str(absolute_tm[1]) + "/" + str(absolute_tm[2]) + "/" + str(absolute_tm[0])
log_handle.write(time_str)
log_handle.close()
| #TODO: UDPATE THIS HEADER -> THIS IS NOT CORRECT
#This script does the following:
# 1) Record H264 Video using PiCam at a maximum bitrate of {bitrate_max} kbps
# 2) Record video data to a local BytesIO object
# 3) Send raw data over a TCP socket to a ground server
# 4) Store raw data to an onboard file
# 5) Clears BytesIO object after network stream and file store
# 6) Interrupts and ends recording after 'record_time' seconds
# Client: RASPI
# Server: COMPUTER
# Author: <NAME>
# Last Edited: 11/3/19
# Libraries
# -> picamera -> PiCamera: Enables pi cam interfacing and settings manipulation
# -> picamera -> PiCameraCircularIO: Allows for a circular buffer (if we want one)
# -> threading: enables timer interrupt
# -> io -> BytesIO : local file-like object that camera streams to
# -> socket: allows for UDP socket and message sending
# -> time: used to measure timing aspects of the system
# -> os: runs terminal commands from python
# -> sys: used exclusively for exiting the program
from picamera import PiCamera
from picamera import CircularIO
from io import BytesIO
import threading
import socket
import time
import os
import sys
import json
import serial
import struct
from numpy import array
import selectors
# add nav directory to sys.path
sys.path.append("../../")
import nav.NavMain
home = '/home/pi'
store_dir = home + "/rocket_data"
cmd = "mkdir " + store_dir
os.system(cmd)
log_file = store_dir + "/system_log.txt"
log_handle = open(log_file, 'w')
def get_time():
absolute_tm = time.localtime()
ms = time.time()
ms = ms - int(ms)
return str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5]) + ":%.3f| "%(ms)
def log_start(msg):
log_handle.write(("\n" + get_time() + msg))
absolute_tm = time.localtime()
time_str = "Script Started at " + str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5])
time_str += " on " + str(absolute_tm[1]) + "/" + str(absolute_tm[2]) + "/" + str(absolute_tm[0])
log_handle.write(time_str)
#============== MACROS ===============================
#Constant Macros
PI = 3.1415926535
#Network Macros
STREAM_READ = 1
STREAM_WRITE = 2
#================================== Networking Class =========================
class client_stream:
def __init__(self, name, server_IP, server_port, read_store_file, write_store_file, mode):
self.name = name
self.server_IP = server_IP
self.server_port = server_port
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.read_store_file = read_store_file
self.write_store_file = write_store_file
self.mode = mode #can be STREAM_READ, STREAM_WRTIE, or STREAM_READ|STREAM_WRITE
self.alive = False
self.read_file = False
self.write_file = False
self.print_output = True
self.log_output = True
if (mode & STREAM_READ == STREAM_READ):
self.read_buffer = BytesIO()
self.read_file_handle = open(read_store_file, 'wb')
self.read_file = True
if (mode & STREAM_WRITE == STREAM_WRITE):
self.write_buffer = BytesIO()
self.write_file_handle = open(write_store_file, 'wb')
self.write_file = True
#Statistics [OPTIONAL]
self.recv_packet_cnt = 0
self.recv_total_bytes = 0
self.send_packet_cnt = 0
self.send_total_bytes = 0
def __bool__(self):
return self.alive
def print_statistics(self):
mode_name = ['STREAM_READ', 'STREAM_WRITE', 'STREAM_READ|STREAM_WRITE']
print("Stream Name: %s"%(self.name))
print("Stream Mode: %s"%(mode_name[self.mode-1]))
print("Read:\n\tPackets: %d\n\tTotal Bytes: %d"%(self.recv_packet_cnt, self.recv_total_bytes))
print("Write:\n\tPackets: %d\n\tTotal Bytes: %d"%(self.send_packet_cnt, self.send_total_bytes))
def stream_print(self, msg):
if (not(self.print_output)):
return
print("%s: %s"%(self.name, msg))
def log_print(self, msg):
if (not(self.log_output)):
return
log_start("%s: %s"%(self.name, msg))
def force_bytes(self, msg):
if isinstance(msg, bytes):
return msg
elif isinstance(msg, str):
return msg.encode('utf-8')
elif isinstance(msg, int):
return struct.pack('>i', msg)
elif isinstance(msg, float):
return struct.pack('>f', msg)
elif isinstance(msg, bool):
return struct.pack('>?', msg)
else:
return None
def register_with_server(self):
if not(self.alive):
return
self.stream_print("Registering with server")
self.log_print("Registering with server")
if (self.name == 'VIDEO'):
self.send_packet(b'vid src')
if (self.name == 'TELEMETRY'):
self.send_packet(b'telem src')
self.send_packet(b'im alive')
def connect_to_server(self):
connect_cnt = 0
self.log_print("Attempting to connect to server")
while connect_cnt < 5:
self.stream_print("Server connection attempt #%d"%(connect_cnt+1))
try:
self.socket_obj.connect_ex((self.server_IP, self.server_port))
self.stream_print("Connection Successful")
self.log_print("Connection Successful")
self.alive = True
break
except:
connect_cnt += 1
self.register_with_server()
def store_buffer(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
return
if(not(self.read_file)):
return
self.stream_print("Storing READ Buffer: %d Bytes"%(self.get_buffer_size(STREAM_READ)))
self.read_file_handle.write(self.read_buffer.getvalue())
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
return
if(not(self.write_file)):
return
self.stream_print("Storing WRITE Buffer: %d Bytes"%(self.get_buffer_size(STREAM_WRITE)))
self.write_file_handle.write(self.write_buffer.getvalue())
def get_buffer_size(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
return self.read_buffer.getbuffer().nbytes
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
return self.write_buffer.getbuffer().nbytes
def clear_buffer(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
if (self.get_buffer_size(STREAM_READ) == 0):
return
self.stream_print("Clearing READ Buffer")
self.read_buffer.truncate(0)
self.read_buffer.seek(0)
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
if (self.get_buffer_size(STREAM_WRITE) == 0):
return
self.stream_print("Clearing WRITE Buffer")
self.write_buffer.truncate(0)
self.write_buffer.seek(0)
def close(self):
self.stream_print("RUNNING FULL CLOSE")
if (not(self.mode & STREAM_READ)):
pass
else:
self.store_buffer(STREAM_READ)
self.clear_buffer(STREAM_READ)
self.close_file(STREAM_READ)
if (not(self.mode & STREAM_WRITE)):
pass
else:
if (self.get_buffer_size(STREAM_WRITE) > 0):
self.send_packet(self.write_buffer.getvalue())
self.store_buffer(STREAM_WRITE)
self.clear_buffer(STREAM_WRITE)
self.close_file(STREAM_WRITE)
self.close_socket()
def close_socket(self):
if (not(self.alive)):
return
self.alive = False
self.stream_print("Closing Socket")
self.socket_obj.close()
def close_file(self, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
return
if(not(self.read_file)):
return
self.stream_print("Closing READ File")
self.read_file_handle.close()
self.read_file = False
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
return
if(not(self.write_file)):
return
self.stream_print("Closing WRITE File")
self.write_file_handle.close()
self.write_file = False
def add_to_buffer(self, msg, mode):
if (mode & STREAM_READ == STREAM_READ):
if (not(self.mode & STREAM_READ)):
self.stream_print("NO READ BUFFER")
return
self.stream_print("Adding to READ Buffer")
self.read_buffer.write(msg)
if (mode & STREAM_WRITE == STREAM_WRITE):
if (not(self.mode & STREAM_WRITE)):
self.stream_print("NO WRITE BUFFER")
return
self.stream_print("Adding to WRITE Buffer")
self.write_buffer.write(msg)
def send_packet(self, msg):
if (not(self.alive)):
return
transmitted = False
self.stream_print("Running SEND_PACKET")
if (not(self.mode & STREAM_WRITE)):
self.stream_print("WRITE ACCESS DENIED")
else:
msg = self.force_bytes(msg)
if msg is None:
self.stream_print("ERROR BAD DATA TYPE: NOT SERIALIZABLE -> Packet not sent")
return
try:
self.send_packet_cnt += 1
self.send_total_bytes += len(msg)
self.socket_obj.sendall(msg)
self.stream_print("Packet %d | Size (%d) Sent successfully"%(self.send_packet_cnt, len(msg)))
transmitted = True
except:
self.stream_print("ERROR Sending Message, closing socket")
self.log_print("ERROR Sending Message, closing socket")
self.close_socket()
def recv_new_packet(self):
if (not(self.alive)):
return None
if (not(self.mode & STREAM_READ)):
self.stream_print("READ ACCESS DENIED")
else:
packet = self.socket_obj.recv(4096)
if (not(packet)):
self.stream_print("Stream ended, storing, then closing connection and file")
self.log_print("Stream ended, storing, then closing connection and file")
self.close_socket()
return None
search = "KILL STREAM"
if packet.find(search.encode('utf-8')) != -1:
self.stream_print("KILL statement heard, doing full close")
self.log_print("KILL statement heard, doing full close")
return "kill"
self.stream_print("%s: New Packet | Size: %d Bytes"%(self.name, len(packet)))
self.read_buffer.write(packet)
self.recv_packet_cnt += 1
self.recv_total_bytes += len(packet)
return None
def wait_for_start(self, time_out):
start = time.time()
time_diff = time.time()-start
sel = selectors.DefaultSelector()
events = selectors.EVENT_READ|selectors.EVENT_WRITE
sel.register(self.socket_obj, events, data="onlyDamnSocket")
last_time = int(time_out)
while(time_diff < time_out):
events = sel.select(timeout = 1.0)
for key, mask in events:
if key.data == "onlyDamnSocket" and mask == selectors.EVENT_READ|selectors.EVENT_WRITE:
self.stream_print("EVENT FOUND")
packet = self.socket_obj.recv(4096)
if (not(packet)):
self.stream_print("Stream ended, storing, then closing connection and file")
self.log_print("Stream ended, storing, then closing connection and file")
self.close_socket()
return
search = "turn you on"
self.stream_print("Packet Has data")
if packet.find(search.encode('utf-8')) != -1:
self.stream_print("STARTUP FOUND!")
return
time_diff = time.time()-start
if int(time_diff) != last_time:
msg = "Time left till automatic start; %.2f"%(time_out-time_diff)
self.stream_print(msg)
self.send_packet(msg.encode('utf-8'))
last_time = int(time_diff)
return
#================================== Serial Class =========================
class teensy_handle:
def __init__(self):
self.baudrate = 115200
#self.serial_port = '/dev/ttyACM0' #USB serial
self.serial_port = '/dev/ttyAMA0' #Serial pins TX/RX -> 14/15
self.ser = None
self.status = [0,0,0] #teensy, imu, gps, alt
self.connected = False
self.alive = False
self.print_output = True
self.log_output = True
def __bool__(self):
return self.alive
def stream_print(self, msg):
if (not(self.print_output)):
return
print("TEENSY: %s"%(msg))
def log_print(self, msg):
if (not(self.log_output)):
return
log_start("TEENSY: %s"%(msg))
def connect(self):
connect_cnt = 0
while (not(self.connected)):
try:
self.ser = serial.Serial(self.serial_port, self.baudrate, timeout=0.1)
self.connected = True
self.stream_print("Connected!")
self.log_print("Connected!")
return True
except:
connect_cnt += 1
self.stream_print("Trying to Connect: Attempt #%d"%(connect_cnt))
if connect_cnt > 10:
self.stream_print("Not found, Unable to Connect")
self.log_print("Not found, Unable to Connect")
return False
def start_up(self, time_out):
if (not(self.connected)):
return
starting = False
self.stream_print("Starting Startup")
resp = b''
for i in range(0,5):
self.ser.write(b'startup')
resp = self.ser.readline()
if resp.find(b'starting') != -1:
starting = True
break
if (not(starting)):
self.stream_print("ERROR Teensy never heard startup")
self.log_print("ERROR Teensy never heard startup")
return
resp = b''
self.stream_print("Started and waiting for initialized from teensy")
start = time.time()
while(resp.find(b'initialized')):
#do error analysis of output in here
resp = self.ser.readline()
if resp:
self.stream_print(resp)
if time.time()-start > time_out:
self.log_print("ERROR Teensy Watch Dog Timed out")
self.stream_print("ERROR Teensy Watch Dog Timed out")
return
self.stream_print("Teensy successfully started")
self.log_print("Teensy successfully started")
self.alive = True
def start_stream(self):
if (not(self.connected)):
return
self.stream_print("Beginning JSON Stream")
self.log_print("Beginning JSON Stream")
self.ser.write(b'dump')
def read_in_json(self):
if (not(self.connected)):
return
JSON_packet = b''
try:
JSON_packet = self.ser.readline()
except:
self.alive = False
if (len(JSON_packet)):
self.stream_print("New Packet: %s"%(JSON_packet))
if (JSON_packet.find(b'dump_init') != -1):
self.alive = False
for i in range(0,3):
self.ser.write(b'startup')
if (JSON_packet.find(b'initialized') != -1):
self.ser.write(b'dump')
self.alive = True
corrupt = False
try:
JSON_obj = json.loads(JSON_packet)
except:
self.stream_print("Error Creating JSON Obj; Invalid String")
corrupt = True
pass
if (not(corrupt)):
return JSON_obj
else:
return False
#TODO Create rocket class to record information and inform system operation
#keep track of:
# max speed(resultant and DOF)
# max acc(resultant and DOF)
# max_height, height
# whether it is back on the ground AGAIN
# timesince on the ground
#======================= Global Variables and Objects =================
vid_record_file = store_dir + '/video_stream.h264' #on-board file video is stored to
telem_record_file = store_dir + '/telemtry_stream.txt'
telem_cmd_file = store_dir + '/telemetry_cmds.txt'
bitrate_max = 200000 # bits per second
record_time = 60 # Time in seconds that the recording runs for
record_chunk = 0.12 #chunk size in seconds video object is broken into and sent
frame_rate = 15 #camera frame rate
interrupt_bool = False #global interrupt flag that ends recording/program
store_and_send_bool = False #global interrupt flag that initiates sending and storing of camera data
#ensures chunk size is not smaller than one frame
if record_chunk < 1/frame_rate:
record_chunk = 1/frame_rate
#Camera Settings
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = frame_rate
#Network Settings
SERVER_IP = '192.168.3.11'
SERVER_IP = '192.168.3.11'
SERVER_VIDEO_PORT = 5000
SERVER_TELEM_PORT = 5001
#Create stream objects for video and telemetry
#def __init__(self, name, server_IP, server_port, read_store_file, write_store_file, mode):
video_stream = client_stream("VIDEO", SERVER_IP, SERVER_VIDEO_PORT, None, vid_record_file, STREAM_WRITE)
telem_stream = client_stream("TELEMETRY", SERVER_IP, SERVER_TELEM_PORT, telem_cmd_file, telem_record_file, STREAM_WRITE|STREAM_READ)
#Create Selector object to allow for non-blocking read of telemetry port
main_sel = selectors.DefaultSelector()
telem_events = selectors.EVENT_READ|selectors.EVENT_WRITE
main_sel.register(telem_stream.socket_obj, telem_events, data="Telem_upstream")
#========================= Functions =================================
def interrupt_func():
#Interrupt function that ends camera streaming and program
global interrupt_bool
interrupt_bool = True
print("Program Timer up")
def store_interrupt_func():
#interrupt function that initiates sending and storing camera data
global store_and_send_bool
store_and_send_bool = True
#threading.Timer(record_chunk, store_interrupt_func).start()
def get_new_state(current_state, JSON_packet, previous_millis):
time_diff = JSON_packet["hdr"][1] - previous_millis
if time_diff > 500:
#integer value is number of milliseconds since the last observation:
#New samples should come every 100ms
#buffer of 500ms is there to protect from bogus values (and hence state propogation) from first packet, or first packet after long drop out
#therefore, if it is greater, ignore this packet and return the same state
return current_state
else:
time_diff = float(time_diff)/1000.0
imu_data = JSON_packet["imu"]
gps_data = JSON_packet["gps"]
alt_data = JSON_packet["tpa"]
altitude = float(alt_data[2])
#convert JSON_packet to python dict
sensor_readings = {
"time": time_diff,
"altitude": altitude,
"gps": array(gps_data),
"accel_nc": array(imu_data[7:10]),
"accel_c": array(imu_data[0:3]),
"angular_velocity": array(imu_data[13:16]),
"q_inert_to_body": array(imu_data[3:7])
}
#recieve updated state
updated_state = nav.NavMain.main(current_state, sensor_readings)
#return updated state
return updated_state
def form_bin_packet(current_state, packet_cnt, status_list):
#take in current state dict
MET = current_state["time"]
position = current_state["position"]
velocity = current_state["velocity"]
attitude = current_state["attitude"]
#form binary packet
packet_bytes = bytearray([192, 222]) #0xC0DE in hex (BEGINNING OF PACKET)
packet_bytes += bytearray(struct.pack('>if', packet_cnt, MET))
packet_bytes += bytearray(struct.pack('>???????', status_list[0], status_list[1],status_list[2],status_list[3],status_list[4],status_list[5],status_list[6]))
packet_bytes += bytearray(struct.pack('>fff', position[0], position[1], position[2]))
packet_bytes += bytearray(struct.pack('>fff', velocity[0], velocity[1], velocity[2]))
packet_bytes += bytearray(struct.pack('>ffff', attitude[0], attitude[1], attitude[2], attitude[3]))
packet_bytes += bytearray([237,12]) #0xED0C in hex (END OF PACKET)
#return binary packet
return packet_bytes
def populate_status_list(status_list, JSON_packet, telem_stream_alive, video_stream_alive, teensy_alive):
#IMU, GPS, ALT, Teensy, Raspi, LTE, Serial
if JSON_packet is not(None):
status_list[1] = JSON_packet['hdr'][2] #GPS
status_list[2] = JSON_packet['hdr'][3] #ALT
status_list[0] = JSON_packet['hdr'][4] #IMU
if (telem_stream_alive or video_stream_alive):
status_list[5] = True
else:
status_list[5] = False
if (teensy_alive):
status_list[3] = True
else:
status_list[3] = False
#======================== Video/Telemetry Streaming and Recording ============
loop_cnt = 0.0
cnt = 0
#Navigation Variables
current_state = {
"time": 0.0,
"position":array([0.0, 0.0, 0.0]),
"velocity":array([0.0, 0.0, 0.0]),
"attitude":array([1.0, 0.0, 0.0, 0.0])
}
status_list = [False, False, False, False, False, False, False] #IMU, GPS, ALT, Teensy, Raspi, LTE, Serial
previous_millis = 0
#Connect to Server
video_stream.connect_to_server()
telem_stream.connect_to_server()
#Connect to Teensy and do hand shake
teensy = teensy_handle()
status_list[6] = teensy.connect()
teensy.start_up(31)
#Wait for startup signal from server
if (telem_stream.alive):
telem_stream.wait_for_start(40) #value here is the timeout
status_list[4] = True
populate_status_list(status_list, None, telem_stream.alive, video_stream.alive, teensy.alive)
print("STARTING STREAM")
#=================================== Offical Beginning of Stream -> Do all setup before this =============
#Begin Pi Cam recording
camera.start_recording(video_stream.write_buffer, format='h264', bitrate=bitrate_max)
#Start timer threads
#threading.Timer(record_time, interrupt_func).start()
threading.Timer(record_chunk, store_interrupt_func).start()
#Start dump of data from Teensy:
teensy.start_stream()
program_start = time.time()
#Main Program Loop
while not(interrupt_bool): #TODO while (telem_stream or video_stream or {rocket has been up and back down to the ground for a significant amount of time} maybe use a class to carry this out
#Look at selector for any events then move on to rest:
events = main_sel.select(timeout=0.05)
for key, mask in events:
if ((key.data == "Telem_upstream") and (mask == selectors.EVENT_READ|selectors.EVENT_WRITE)):
res = telem_stream.recv_new_packet()
if isinstance(res, str):
if res == "kill":
print("KILL SWITCH RECIEVED -> CLOSING STREAMS AND ENDING PROGRAM")
interrupt_bool = True
#pull in new packet from teensy (TIMEOUT IS EVERY 0.1 SECONDS SO THIS WILL BLOCK FOR AT LEAST 0.1s)
new_JSON = teensy.read_in_json()
packet_Bytes = False
if (not(new_JSON)):
pass
else:
#New JSON from teensy -> use information to propagate state and any other essential values
current_state = get_new_state(current_state, new_JSON, previous_millis)
previous_millis = new_JSON["hdr"][1]
populate_status_list(status_list, new_JSON, telem_stream.alive, video_stream.alive, teensy.alive)
packet_Bytes = form_bin_packet(current_state, new_JSON["hdr"][0], status_list)
if (packet_Bytes):
#New Telemetry Data: Add to the buffer
telem_stream.add_to_buffer(packet_Bytes, STREAM_WRITE)
#If buffer gets to a certain size, send and store
packet_size = len(packet_Bytes)
if (telem_stream.get_buffer_size(STREAM_WRITE)/packet_size > 10):
#Send Buffer over Network
if (telem_stream):
telem_stream.send_packet(telem_stream.write_buffer.getvalue())
else:
telem_stream.connect_to_server()
#Store Buffer to File
telem_stream.store_buffer(STREAM_WRITE)
#Clear Buffer
telem_stream.clear_buffer(STREAM_WRITE)
#Camera Store and Send
# -> operates on global bool which is flipped by timer
# -> timer goes off every {record_chunk}s
# -> should be a little less than the serial timeout + selector timeout
if (store_and_send_bool):
#Reset Timer
threading.Timer(record_chunk, store_interrupt_func).start()
#Reset global interrupt flag
store_and_send_bool = False
#Send Video Data over Network
if (video_stream):
video_stream.send_packet(video_stream.write_buffer.getvalue())
else:
video_stream.connect_to_server()
#Store Data to File
video_stream.store_buffer(STREAM_WRITE)
#Clear Buffer
video_stream.clear_buffer(STREAM_WRITE)
#======================================================================================
#check to see if telem stream or video stream are still open, and if so close/echo kill
if (video_stream):
video_stream.send_packet(b'KILL STREAM')
video_stream.close()
if (telem_stream):
video_stream.send_packet(b'KILL STREAM')
video_stream.close()
#End Recording and Tidy Up
total_time = time.time() - program_start
log_start("Stream Ended, Closing sockets and files")
video_stream.print_statistics()
telem_stream.print_statistics()
absolute_tm = time.localtime()
time_str = "\nScript Ended at " + str(absolute_tm[3]) + ":" + str(absolute_tm[4]) + ":" + str(absolute_tm[5])
time_str += " on " + str(absolute_tm[1]) + "/" + str(absolute_tm[2]) + "/" + str(absolute_tm[0])
log_handle.write(time_str)
log_handle.close() | en | 0.695571 | #TODO: UDPATE THIS HEADER -> THIS IS NOT CORRECT #This script does the following: # 1) Record H264 Video using PiCam at a maximum bitrate of {bitrate_max} kbps # 2) Record video data to a local BytesIO object # 3) Send raw data over a TCP socket to a ground server # 4) Store raw data to an onboard file # 5) Clears BytesIO object after network stream and file store # 6) Interrupts and ends recording after 'record_time' seconds # Client: RASPI # Server: COMPUTER # Author: <NAME> # Last Edited: 11/3/19 # Libraries # -> picamera -> PiCamera: Enables pi cam interfacing and settings manipulation # -> picamera -> PiCameraCircularIO: Allows for a circular buffer (if we want one) # -> threading: enables timer interrupt # -> io -> BytesIO : local file-like object that camera streams to # -> socket: allows for UDP socket and message sending # -> time: used to measure timing aspects of the system # -> os: runs terminal commands from python # -> sys: used exclusively for exiting the program # add nav directory to sys.path #============== MACROS =============================== #Constant Macros #Network Macros #================================== Networking Class ========================= #can be STREAM_READ, STREAM_WRTIE, or STREAM_READ|STREAM_WRITE #Statistics [OPTIONAL] #%d"%(connect_cnt+1)) #================================== Serial Class ========================= #self.serial_port = '/dev/ttyACM0' #USB serial #Serial pins TX/RX -> 14/15 #teensy, imu, gps, alt #%d"%(connect_cnt)) #do error analysis of output in here #TODO Create rocket class to record information and inform system operation #keep track of: # max speed(resultant and DOF) # max acc(resultant and DOF) # max_height, height # whether it is back on the ground AGAIN # timesince on the ground #======================= Global Variables and Objects ================= #on-board file video is stored to # bits per second # Time in seconds that the recording runs for #chunk size in seconds video object is broken into and sent #camera frame rate #global interrupt flag that ends recording/program #global interrupt flag that initiates sending and storing of camera data #ensures chunk size is not smaller than one frame #Camera Settings #Network Settings #Create stream objects for video and telemetry #def __init__(self, name, server_IP, server_port, read_store_file, write_store_file, mode): #Create Selector object to allow for non-blocking read of telemetry port #========================= Functions ================================= #Interrupt function that ends camera streaming and program #interrupt function that initiates sending and storing camera data #threading.Timer(record_chunk, store_interrupt_func).start() #integer value is number of milliseconds since the last observation: #New samples should come every 100ms #buffer of 500ms is there to protect from bogus values (and hence state propogation) from first packet, or first packet after long drop out #therefore, if it is greater, ignore this packet and return the same state #convert JSON_packet to python dict #recieve updated state #return updated state #take in current state dict #form binary packet #0xC0DE in hex (BEGINNING OF PACKET) #0xED0C in hex (END OF PACKET) #return binary packet #IMU, GPS, ALT, Teensy, Raspi, LTE, Serial #GPS #ALT #IMU #======================== Video/Telemetry Streaming and Recording ============ #Navigation Variables #IMU, GPS, ALT, Teensy, Raspi, LTE, Serial #Connect to Server #Connect to Teensy and do hand shake #Wait for startup signal from server #value here is the timeout #=================================== Offical Beginning of Stream -> Do all setup before this ============= #Begin Pi Cam recording #Start timer threads #threading.Timer(record_time, interrupt_func).start() #Start dump of data from Teensy: #Main Program Loop #TODO while (telem_stream or video_stream or {rocket has been up and back down to the ground for a significant amount of time} maybe use a class to carry this out #Look at selector for any events then move on to rest: #pull in new packet from teensy (TIMEOUT IS EVERY 0.1 SECONDS SO THIS WILL BLOCK FOR AT LEAST 0.1s) #New JSON from teensy -> use information to propagate state and any other essential values #New Telemetry Data: Add to the buffer #If buffer gets to a certain size, send and store #Send Buffer over Network #Store Buffer to File #Clear Buffer #Camera Store and Send # -> operates on global bool which is flipped by timer # -> timer goes off every {record_chunk}s # -> should be a little less than the serial timeout + selector timeout #Reset Timer #Reset global interrupt flag #Send Video Data over Network #Store Data to File #Clear Buffer #====================================================================================== #check to see if telem stream or video stream are still open, and if so close/echo kill #End Recording and Tidy Up | 2.833643 | 3 |
var/spack/repos/builtin.mock/packages/installed-deps-d/package.py | jeanbez/spack | 0 | 6624910 | <filename>var/spack/repos/builtin.mock/packages/installed-deps-d/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class InstalledDepsD(Package):
"""Used by test_installed_deps test case."""
# a
# / \
# b c b --> d build/link
# |\ /| b --> e build/link
# |/ \| c --> d build
# d e c --> e build/link
homepage = "http://www.example.com"
url = "http://www.example.com/d-1.0.tar.gz"
version("1", "0123456789abcdef0123456789abcdef")
version("2", "abcdef0123456789abcdef0123456789")
version("3", "def0123456789abcdef0123456789abc")
| <filename>var/spack/repos/builtin.mock/packages/installed-deps-d/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class InstalledDepsD(Package):
"""Used by test_installed_deps test case."""
# a
# / \
# b c b --> d build/link
# |\ /| b --> e build/link
# |/ \| c --> d build
# d e c --> e build/link
homepage = "http://www.example.com"
url = "http://www.example.com/d-1.0.tar.gz"
version("1", "0123456789abcdef0123456789abcdef")
version("2", "abcdef0123456789abcdef0123456789")
version("3", "def0123456789abcdef0123456789abc")
| en | 0.611842 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Used by test_installed_deps test case. # a # / \ # b c b --> d build/link # |\ /| b --> e build/link # |/ \| c --> d build # d e c --> e build/link | 1.478592 | 1 |
indico/core/db/sqlalchemy/links.py | bpedersen2/indico | 1 | 6624911 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import partial
from itertools import chain
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.decorators import strict_classproperty
from indico.util.enum import RichIntEnum
from indico.util.i18n import _
class LinkType(RichIntEnum):
__titles__ = (None, _('Category'), _('Event'), _('Contribution'), _('Subcontribution'), _('Session'),
_('Session block'))
category = 1
event = 2
contribution = 3
subcontribution = 4
session = 5
session_block = 6
_all_columns = {'category_id', 'linked_event_id', 'contribution_id', 'subcontribution_id', 'session_id',
'session_block_id'}
_columns_for_types = {
LinkType.category: {'category_id'},
LinkType.event: {'linked_event_id'},
LinkType.contribution: {'contribution_id'},
LinkType.subcontribution: {'subcontribution_id'},
LinkType.session: {'session_id'},
LinkType.session_block: {'session_block_id'},
}
def _make_checks(allowed_link_types):
available_columns = set(chain.from_iterable(cols for type_, cols in _columns_for_types.items()
if type_ in allowed_link_types))
yield db.CheckConstraint(f'(event_id IS NULL) = (link_type = {LinkType.category})', 'valid_event_id')
for link_type in allowed_link_types:
required_cols = available_columns & _columns_for_types[link_type]
forbidden_cols = available_columns - required_cols
criteria = [f'{col} IS NULL' for col in sorted(forbidden_cols)]
criteria += [f'{col} IS NOT NULL' for col in sorted(required_cols)]
condition = 'link_type != {} OR ({})'.format(link_type, ' AND '.join(criteria))
yield db.CheckConstraint(condition, f'valid_{link_type.name}_link')
def _make_uniques(allowed_link_types, extra_criteria=None):
for link_type in allowed_link_types:
where = [f'link_type = {link_type.value}']
if extra_criteria is not None:
where += list(extra_criteria)
yield db.Index(None, *_columns_for_types[link_type], unique=True,
postgresql_where=db.text(' AND '.join(where)))
class LinkMixin:
#: The link types that are supported. Can be overridden in the
#: model using the mixin. Affects the table structure, so any
#: changes to it should go along with a migration step!
allowed_link_types = frozenset(LinkType)
#: If only one link per object should be allowed. This may also
#: be a string containing an SQL string to specify the criterion
#: for the unique index to be applied, e.g. ``'is_foo = true'``.
unique_links = False
#: The name of the backref that's added to the Event model to
#: access *all* linked objects
events_backref_name = None
#: The name of the backref that's added to the linked objects
link_backref_name = None
#: The laziness of the backref that's added to the linked objects
link_backref_lazy = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
args = tuple(_make_checks(cls.allowed_link_types))
if cls.unique_links:
extra_criteria = [cls.unique_links] if isinstance(cls.unique_links, str) else None
args = args + tuple(_make_uniques(cls.allowed_link_types, extra_criteria))
return args
@classmethod
def register_link_events(cls):
"""Register sqlalchemy events needed by this mixin.
Call this method after the definition of a model which uses
this mixin class.
"""
event_mapping = {cls.session: lambda x: x.event,
cls.session_block: lambda x: x.event,
cls.contribution: lambda x: x.event,
cls.subcontribution: lambda x: x.contribution.event,
cls.linked_event: lambda x: x}
type_mapping = {cls.category: LinkType.category,
cls.linked_event: LinkType.event,
cls.session: LinkType.session,
cls.session_block: LinkType.session_block,
cls.contribution: LinkType.contribution,
cls.subcontribution: LinkType.subcontribution}
def _set_link_type(link_type, target, value, *unused):
if value is not None:
target.link_type = link_type
def _set_event_obj(fn, target, value, *unused):
if value is not None:
event = fn(value)
assert event is not None
target.event = event
for rel, fn in event_mapping.items():
if rel is not None:
listen(rel, 'set', partial(_set_event_obj, fn))
for rel, link_type in type_mapping.items():
if rel is not None:
listen(rel, 'set', partial(_set_link_type, link_type))
@declared_attr
def link_type(cls):
return db.Column(
PyIntEnum(LinkType, exclude_values=set(LinkType) - cls.allowed_link_types),
nullable=False
)
@declared_attr
def category_id(cls):
if LinkType.category in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
nullable=True,
index=True
)
@declared_attr
def event_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=True,
index=True
)
@declared_attr
def linked_event_id(cls):
if LinkType.event in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=True,
index=True
)
@declared_attr
def session_id(cls):
if LinkType.session in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.sessions.id'),
nullable=True,
index=True
)
@declared_attr
def session_block_id(cls):
if LinkType.session_block in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
nullable=True,
index=True
)
@declared_attr
def contribution_id(cls):
if LinkType.contribution in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
nullable=True,
index=True
)
@declared_attr
def subcontribution_id(cls):
if LinkType.subcontribution in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
nullable=True,
index=True
)
@declared_attr
def category(cls):
if LinkType.category in cls.allowed_link_types:
return db.relationship(
'Category',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def event(cls):
return db.relationship(
'Event',
foreign_keys=cls.event_id,
lazy=True,
backref=db.backref(
cls.events_backref_name,
lazy='dynamic'
)
)
@declared_attr
def linked_event(cls):
if LinkType.event in cls.allowed_link_types:
return db.relationship(
'Event',
foreign_keys=cls.linked_event_id,
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def session(cls):
if LinkType.session in cls.allowed_link_types:
return db.relationship(
'Session',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def session_block(cls):
if LinkType.session_block in cls.allowed_link_types:
return db.relationship(
'SessionBlock',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def contribution(cls):
if LinkType.contribution in cls.allowed_link_types:
return db.relationship(
'Contribution',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def subcontribution(cls):
if LinkType.subcontribution in cls.allowed_link_types:
return db.relationship(
'SubContribution',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@hybrid_property
def object(self):
if self.link_type == LinkType.category:
return self.category
elif self.link_type == LinkType.event:
return self.event
elif self.link_type == LinkType.session:
return self.session
elif self.link_type == LinkType.session_block:
return self.session_block
elif self.link_type == LinkType.contribution:
return self.contribution
elif self.link_type == LinkType.subcontribution:
return self.subcontribution
@object.setter
def object(self, obj):
self.category = None
self.linked_event = self.event = self.session = self.session_block = None
self.contribution = self.subcontribution = None
if isinstance(obj, db.m.Category):
self.category = obj
elif isinstance(obj, db.m.Event):
self.linked_event = obj
elif isinstance(obj, db.m.Session):
self.session = obj
elif isinstance(obj, db.m.SessionBlock):
self.session_block = obj
elif isinstance(obj, db.m.Contribution):
self.contribution = obj
elif isinstance(obj, db.m.SubContribution):
self.subcontribution = obj
else:
raise TypeError(f'Unexpected object: {obj}')
@object.comparator
def object(cls):
return LinkedObjectComparator(cls)
@property
def link_repr(self):
"""A kwargs-style string suitable for the object's repr."""
info = [('link_type', self.link_type.name if self.link_type is not None else 'None')]
info.extend((key, getattr(self, key)) for key in _all_columns if getattr(self, key) is not None)
return ', '.join(f'{key}={value}' for key, value in info)
@property
def link_event_log_data(self):
"""
Return a dict containing information about the linked object
suitable for the event log.
It does not return any information for an object linked to a
category or the event itself.
"""
data = {}
if self.link_type == LinkType.session:
data['Session'] = self.session.title
if self.link_type == LinkType.session_block:
data['Session Block'] = self.session_block.title
elif self.link_type == LinkType.contribution:
data['Contribution'] = self.contribution.title
elif self.link_type == LinkType.subcontribution:
data['Contribution'] = self.subcontribution.contribution.title
data['Subcontribution'] = self.subcontribution.title
return data
class LinkedObjectComparator(Comparator):
def __init__(self, cls):
self.cls = cls
def __clause_element__(self):
# just in case
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, db.m.Category):
return db.and_(self.cls.link_type == LinkType.category,
self.cls.category_id == other.id)
elif isinstance(other, db.m.Event):
return db.and_(self.cls.link_type == LinkType.event,
self.cls.linked_event_id == other.id)
elif isinstance(other, db.m.Session):
return db.and_(self.cls.link_type == LinkType.session,
self.cls.session_id == other.id)
elif isinstance(other, db.m.SessionBlock):
return db.and_(self.cls.link_type == LinkType.session_block,
self.cls.session_block_id == other.id)
elif isinstance(other, db.m.Contribution):
return db.and_(self.cls.link_type == LinkType.contribution,
self.cls.contribution_id == other.id)
elif isinstance(other, db.m.SubContribution):
return db.and_(self.cls.link_type == LinkType.subcontribution,
self.cls.subcontribution_id == other.id)
else:
raise ValueError(f'Unexpected object type {type(other)}: {other}')
| # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import partial
from itertools import chain
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.decorators import strict_classproperty
from indico.util.enum import RichIntEnum
from indico.util.i18n import _
class LinkType(RichIntEnum):
__titles__ = (None, _('Category'), _('Event'), _('Contribution'), _('Subcontribution'), _('Session'),
_('Session block'))
category = 1
event = 2
contribution = 3
subcontribution = 4
session = 5
session_block = 6
_all_columns = {'category_id', 'linked_event_id', 'contribution_id', 'subcontribution_id', 'session_id',
'session_block_id'}
_columns_for_types = {
LinkType.category: {'category_id'},
LinkType.event: {'linked_event_id'},
LinkType.contribution: {'contribution_id'},
LinkType.subcontribution: {'subcontribution_id'},
LinkType.session: {'session_id'},
LinkType.session_block: {'session_block_id'},
}
def _make_checks(allowed_link_types):
available_columns = set(chain.from_iterable(cols for type_, cols in _columns_for_types.items()
if type_ in allowed_link_types))
yield db.CheckConstraint(f'(event_id IS NULL) = (link_type = {LinkType.category})', 'valid_event_id')
for link_type in allowed_link_types:
required_cols = available_columns & _columns_for_types[link_type]
forbidden_cols = available_columns - required_cols
criteria = [f'{col} IS NULL' for col in sorted(forbidden_cols)]
criteria += [f'{col} IS NOT NULL' for col in sorted(required_cols)]
condition = 'link_type != {} OR ({})'.format(link_type, ' AND '.join(criteria))
yield db.CheckConstraint(condition, f'valid_{link_type.name}_link')
def _make_uniques(allowed_link_types, extra_criteria=None):
for link_type in allowed_link_types:
where = [f'link_type = {link_type.value}']
if extra_criteria is not None:
where += list(extra_criteria)
yield db.Index(None, *_columns_for_types[link_type], unique=True,
postgresql_where=db.text(' AND '.join(where)))
class LinkMixin:
#: The link types that are supported. Can be overridden in the
#: model using the mixin. Affects the table structure, so any
#: changes to it should go along with a migration step!
allowed_link_types = frozenset(LinkType)
#: If only one link per object should be allowed. This may also
#: be a string containing an SQL string to specify the criterion
#: for the unique index to be applied, e.g. ``'is_foo = true'``.
unique_links = False
#: The name of the backref that's added to the Event model to
#: access *all* linked objects
events_backref_name = None
#: The name of the backref that's added to the linked objects
link_backref_name = None
#: The laziness of the backref that's added to the linked objects
link_backref_lazy = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
args = tuple(_make_checks(cls.allowed_link_types))
if cls.unique_links:
extra_criteria = [cls.unique_links] if isinstance(cls.unique_links, str) else None
args = args + tuple(_make_uniques(cls.allowed_link_types, extra_criteria))
return args
@classmethod
def register_link_events(cls):
"""Register sqlalchemy events needed by this mixin.
Call this method after the definition of a model which uses
this mixin class.
"""
event_mapping = {cls.session: lambda x: x.event,
cls.session_block: lambda x: x.event,
cls.contribution: lambda x: x.event,
cls.subcontribution: lambda x: x.contribution.event,
cls.linked_event: lambda x: x}
type_mapping = {cls.category: LinkType.category,
cls.linked_event: LinkType.event,
cls.session: LinkType.session,
cls.session_block: LinkType.session_block,
cls.contribution: LinkType.contribution,
cls.subcontribution: LinkType.subcontribution}
def _set_link_type(link_type, target, value, *unused):
if value is not None:
target.link_type = link_type
def _set_event_obj(fn, target, value, *unused):
if value is not None:
event = fn(value)
assert event is not None
target.event = event
for rel, fn in event_mapping.items():
if rel is not None:
listen(rel, 'set', partial(_set_event_obj, fn))
for rel, link_type in type_mapping.items():
if rel is not None:
listen(rel, 'set', partial(_set_link_type, link_type))
@declared_attr
def link_type(cls):
return db.Column(
PyIntEnum(LinkType, exclude_values=set(LinkType) - cls.allowed_link_types),
nullable=False
)
@declared_attr
def category_id(cls):
if LinkType.category in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
nullable=True,
index=True
)
@declared_attr
def event_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=True,
index=True
)
@declared_attr
def linked_event_id(cls):
if LinkType.event in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=True,
index=True
)
@declared_attr
def session_id(cls):
if LinkType.session in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.sessions.id'),
nullable=True,
index=True
)
@declared_attr
def session_block_id(cls):
if LinkType.session_block in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
nullable=True,
index=True
)
@declared_attr
def contribution_id(cls):
if LinkType.contribution in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
nullable=True,
index=True
)
@declared_attr
def subcontribution_id(cls):
if LinkType.subcontribution in cls.allowed_link_types:
return db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
nullable=True,
index=True
)
@declared_attr
def category(cls):
if LinkType.category in cls.allowed_link_types:
return db.relationship(
'Category',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def event(cls):
return db.relationship(
'Event',
foreign_keys=cls.event_id,
lazy=True,
backref=db.backref(
cls.events_backref_name,
lazy='dynamic'
)
)
@declared_attr
def linked_event(cls):
if LinkType.event in cls.allowed_link_types:
return db.relationship(
'Event',
foreign_keys=cls.linked_event_id,
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def session(cls):
if LinkType.session in cls.allowed_link_types:
return db.relationship(
'Session',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def session_block(cls):
if LinkType.session_block in cls.allowed_link_types:
return db.relationship(
'SessionBlock',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def contribution(cls):
if LinkType.contribution in cls.allowed_link_types:
return db.relationship(
'Contribution',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@declared_attr
def subcontribution(cls):
if LinkType.subcontribution in cls.allowed_link_types:
return db.relationship(
'SubContribution',
lazy=True,
backref=db.backref(
cls.link_backref_name,
cascade='all, delete-orphan',
uselist=(cls.unique_links != True), # noqa
lazy=cls.link_backref_lazy
)
)
@hybrid_property
def object(self):
if self.link_type == LinkType.category:
return self.category
elif self.link_type == LinkType.event:
return self.event
elif self.link_type == LinkType.session:
return self.session
elif self.link_type == LinkType.session_block:
return self.session_block
elif self.link_type == LinkType.contribution:
return self.contribution
elif self.link_type == LinkType.subcontribution:
return self.subcontribution
@object.setter
def object(self, obj):
self.category = None
self.linked_event = self.event = self.session = self.session_block = None
self.contribution = self.subcontribution = None
if isinstance(obj, db.m.Category):
self.category = obj
elif isinstance(obj, db.m.Event):
self.linked_event = obj
elif isinstance(obj, db.m.Session):
self.session = obj
elif isinstance(obj, db.m.SessionBlock):
self.session_block = obj
elif isinstance(obj, db.m.Contribution):
self.contribution = obj
elif isinstance(obj, db.m.SubContribution):
self.subcontribution = obj
else:
raise TypeError(f'Unexpected object: {obj}')
@object.comparator
def object(cls):
return LinkedObjectComparator(cls)
@property
def link_repr(self):
"""A kwargs-style string suitable for the object's repr."""
info = [('link_type', self.link_type.name if self.link_type is not None else 'None')]
info.extend((key, getattr(self, key)) for key in _all_columns if getattr(self, key) is not None)
return ', '.join(f'{key}={value}' for key, value in info)
@property
def link_event_log_data(self):
"""
Return a dict containing information about the linked object
suitable for the event log.
It does not return any information for an object linked to a
category or the event itself.
"""
data = {}
if self.link_type == LinkType.session:
data['Session'] = self.session.title
if self.link_type == LinkType.session_block:
data['Session Block'] = self.session_block.title
elif self.link_type == LinkType.contribution:
data['Contribution'] = self.contribution.title
elif self.link_type == LinkType.subcontribution:
data['Contribution'] = self.subcontribution.contribution.title
data['Subcontribution'] = self.subcontribution.title
return data
class LinkedObjectComparator(Comparator):
def __init__(self, cls):
self.cls = cls
def __clause_element__(self):
# just in case
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, db.m.Category):
return db.and_(self.cls.link_type == LinkType.category,
self.cls.category_id == other.id)
elif isinstance(other, db.m.Event):
return db.and_(self.cls.link_type == LinkType.event,
self.cls.linked_event_id == other.id)
elif isinstance(other, db.m.Session):
return db.and_(self.cls.link_type == LinkType.session,
self.cls.session_id == other.id)
elif isinstance(other, db.m.SessionBlock):
return db.and_(self.cls.link_type == LinkType.session_block,
self.cls.session_block_id == other.id)
elif isinstance(other, db.m.Contribution):
return db.and_(self.cls.link_type == LinkType.contribution,
self.cls.contribution_id == other.id)
elif isinstance(other, db.m.SubContribution):
return db.and_(self.cls.link_type == LinkType.subcontribution,
self.cls.subcontribution_id == other.id)
else:
raise ValueError(f'Unexpected object type {type(other)}: {other}')
| en | 0.854646 | # This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. #: The link types that are supported. Can be overridden in the #: model using the mixin. Affects the table structure, so any #: changes to it should go along with a migration step! #: If only one link per object should be allowed. This may also #: be a string containing an SQL string to specify the criterion #: for the unique index to be applied, e.g. ``'is_foo = true'``. #: The name of the backref that's added to the Event model to #: access *all* linked objects #: The name of the backref that's added to the linked objects #: The laziness of the backref that's added to the linked objects Register sqlalchemy events needed by this mixin. Call this method after the definition of a model which uses this mixin class. # noqa # noqa # noqa # noqa # noqa # noqa A kwargs-style string suitable for the object's repr. Return a dict containing information about the linked object suitable for the event log. It does not return any information for an object linked to a category or the event itself. # just in case | 1.880593 | 2 |
zenduty/api/members_api.py | Zenduty/zenduty-python-sdk | 0 | 6624912 | from zenduty.api_client import ApiClient
class MembersApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def add_members_to_team(self,team_id,body):
#Adds a member to a given team, identified by id
#params str team_id: unique id of team
#params dict body: contains the details of the user being added and the team to add to
#Sample body:
# {"team":"d4a777db-5bce-419c-a725-420ebb505c54","user":"af9eeb6<PASSWORD>"}
return self.api_client.call_api('POST','/api/account/teams/{}/members/'.format(team_id),body=body)
def delete_members_from_team(self,team_id,member_id):
#Removes a member from a particular team
#params str team_id: unique id of a team
#params str member_id: unique id of member to be deleted
return self.api_client.call_api('DELETE','/api/account/teams/{}/members/{}/'.format(team_id,member_id))
| from zenduty.api_client import ApiClient
class MembersApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def add_members_to_team(self,team_id,body):
#Adds a member to a given team, identified by id
#params str team_id: unique id of team
#params dict body: contains the details of the user being added and the team to add to
#Sample body:
# {"team":"d4a777db-5bce-419c-a725-420ebb505c54","user":"af9eeb6<PASSWORD>"}
return self.api_client.call_api('POST','/api/account/teams/{}/members/'.format(team_id),body=body)
def delete_members_from_team(self,team_id,member_id):
#Removes a member from a particular team
#params str team_id: unique id of a team
#params str member_id: unique id of member to be deleted
return self.api_client.call_api('DELETE','/api/account/teams/{}/members/{}/'.format(team_id,member_id))
| en | 0.888139 | #Adds a member to a given team, identified by id #params str team_id: unique id of team #params dict body: contains the details of the user being added and the team to add to #Sample body: # {"team":"d4a777db-5bce-419c-a725-420ebb505c54","user":"af9eeb6<PASSWORD>"} #Removes a member from a particular team #params str team_id: unique id of a team #params str member_id: unique id of member to be deleted | 2.77564 | 3 |
telegrambotapiwrapper/response.py | pynista/telegrambotapiwrapper | 1 | 6624913 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 <NAME>; MIT License
"""Response functionality from Telegram Bot Api."""
import jsonpickle
import telegrambotapiwrapper.typelib as types_module
from telegrambotapiwrapper.annotation import AnnotationWrapper
from telegrambotapiwrapper.errors import UnsuccessfulRequest
from telegrambotapiwrapper.request import json_payload
from telegrambotapiwrapper.typelib import ResponseParameters
def is_str_int_float_bool(value):
"""Is value str, int, float, bool."""
return isinstance(value, (int, str, float))
def dataclass_fields_to_jdict(fields: dict) -> dict:
"""Get a json-like dict from the dataclass fields."""
jstr = json_payload(fields)
res = jsonpickle.decode(jstr)
return res
def replace__from__by__from_(d: dict):
"""Replace recursive keys in the object from to from_."""
res = {}
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [replace__from__by__from_(v) for v in d]
for key, value in d.items():
if key == 'from':
res['from_'] = replace__from__by__from_(d['from'])
else:
res[key] = replace__from__by__from_(d[key])
return res
def to_api_type(obj, anno: AnnotationWrapper):
"""Convert object to api type
Convert the result of the request to the Telegram Bot API into the
appropriate type.
Notes:
1)
In the current versions of Telegram Bot Api (4.2), the following
values can be returned:
'Chat',
'ChatMember',
'File',
'List[ChatMember]',
'List[GameHighScore]',
'List[Update]',
'Message',
'Pool',
'StickerSet',
'Union[Message, bool]',
'User',
'UserProfilePhotos',
'WebhookInfo',
'bool',
'int',
'str'
For the current version of Telegram Bot Api (4.2), the following
union-annotations are used to create types.:
'Optional[Union[InputFile, str]]'
For the current version of Telegram Bot Api (4.2), the following
`List`-annotations are used to create types:
'List[EncryptedPassportElement]',
'List[LabeledPrice]',
'List[PhotoSize]',
'List[PollOption]',
'List[Sticker]',
'List[str]',
For the current version of Telegram Bot Api (4.2), the following
`List [List [` - annotations are used to create types:
'List[List[InlineKeyboardButton]]',
'List[List[KeyboardButton]]',
'List[List[PhotoSize]]',
For the current version of Telegram Bot Api (4.2), return values
may have the following `union`- annotations:
'Union [Message, bool]'
2)
"""
def list_to_api_type(obj: list, anno: AnnotationWrapper) -> list:
"""Convert list to api type."""
inner_part = anno.inner_part_of_list.sanitized.data
api_type = getattr(types_module, inner_part)
res = []
for item in obj:
to_type = {}
for field_name, field_type in api_type._annotations().items():
try:
to_type[field_name] = to_api_type(
item[field_name], AnnotationWrapper(field_type))
except KeyError:
continue
res.append(api_type(**to_type))
return res
def list_of_list_to_api_type(obj: list, anno: AnnotationWrapper):
"""Convert list of list to api type."""
res = []
for lst in obj:
res.append(list_to_api_type(lst, anno.inner_part_of_list))
return res
def union_to_api_type(obj, anno: AnnotationWrapper):
"""Convert union to api type."""
if anno == 'Union[Message, bool]':
return to_api_type(obj, AnnotationWrapper('Message'))
elif anno == 'Union[InputFile, str]]':
return to_api_type(obj, AnnotationWrapper('InputFile'))
if is_str_int_float_bool(obj):
return obj
if anno.is_optional:
anno = anno.inner_part_of_optional
if anno.is_list:
return list_to_api_type(obj, anno)
elif anno.is_list_of_list:
return list_of_list_to_api_type(obj, anno)
elif anno.is_union:
return union_to_api_type(obj, anno)
if isinstance(obj, dict):
to_type = {}
api_type = getattr(types_module, anno.sanitized.data)
for field_name, field_type in api_type._annotations().items():
try:
to_type[field_name] = to_api_type(
obj[field_name], AnnotationWrapper(field_type))
except KeyError:
continue
return api_type(**to_type)
def get_result(raw_response: str):
"""Extract the result from the raw response from the Bot API telegram.
Args:
raw_response (str): `raw` response from Bot API telegram
Raises:
RequestResultIsNotOk: if the answer contains no result
Note:
If raw_response does not contain an `ok` field, then we assume that this
is an extracted result, for example, for testing purposes.
"""
response = jsonpickle.loads(raw_response)
ok_field = response['ok']
if ok_field: # TODO: проверить тип ok_field т.е. что это булевское значение, а не строка
return response['result']
else:
description = response['description']
error_code = response['error_code']
parameters = response.get('parameters', None)
if not parameters:
raise UnsuccessfulRequest(description=description, error_code=error_code)
else:
to_rp = {}
if 'migrate_to_chat_id' in parameters:
to_rp['migrate_to_chat_id'] = parameters['migrate_to_chat_id']
if 'retry_after' in parameters:
to_rp['retry_after'] = parameters['retry_after']
raise UnsuccessfulRequest\
(description=description, error_code=error_code, parameters=ResponseParameters(**to_rp))
def handle_response(raw_response: str,
method_response_type: AnnotationWrapper):
"""Parse a string that is a response from the Telegram Bot API.
Args:
raw_response (str): response from Telegram Bot API
method_response_type (AnnotationWrapper): annotation of the expected
response
Raises:
RequestResultIsNotOk: if the answer contains no result
"""
res = get_result(raw_response)
res = replace__from__by__from_(res)
return to_api_type(res, method_response_type) | # -*- coding: utf-8 -*-
# Copyright (c) 2019 <NAME>; MIT License
"""Response functionality from Telegram Bot Api."""
import jsonpickle
import telegrambotapiwrapper.typelib as types_module
from telegrambotapiwrapper.annotation import AnnotationWrapper
from telegrambotapiwrapper.errors import UnsuccessfulRequest
from telegrambotapiwrapper.request import json_payload
from telegrambotapiwrapper.typelib import ResponseParameters
def is_str_int_float_bool(value):
"""Is value str, int, float, bool."""
return isinstance(value, (int, str, float))
def dataclass_fields_to_jdict(fields: dict) -> dict:
"""Get a json-like dict from the dataclass fields."""
jstr = json_payload(fields)
res = jsonpickle.decode(jstr)
return res
def replace__from__by__from_(d: dict):
"""Replace recursive keys in the object from to from_."""
res = {}
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [replace__from__by__from_(v) for v in d]
for key, value in d.items():
if key == 'from':
res['from_'] = replace__from__by__from_(d['from'])
else:
res[key] = replace__from__by__from_(d[key])
return res
def to_api_type(obj, anno: AnnotationWrapper):
"""Convert object to api type
Convert the result of the request to the Telegram Bot API into the
appropriate type.
Notes:
1)
In the current versions of Telegram Bot Api (4.2), the following
values can be returned:
'Chat',
'ChatMember',
'File',
'List[ChatMember]',
'List[GameHighScore]',
'List[Update]',
'Message',
'Pool',
'StickerSet',
'Union[Message, bool]',
'User',
'UserProfilePhotos',
'WebhookInfo',
'bool',
'int',
'str'
For the current version of Telegram Bot Api (4.2), the following
union-annotations are used to create types.:
'Optional[Union[InputFile, str]]'
For the current version of Telegram Bot Api (4.2), the following
`List`-annotations are used to create types:
'List[EncryptedPassportElement]',
'List[LabeledPrice]',
'List[PhotoSize]',
'List[PollOption]',
'List[Sticker]',
'List[str]',
For the current version of Telegram Bot Api (4.2), the following
`List [List [` - annotations are used to create types:
'List[List[InlineKeyboardButton]]',
'List[List[KeyboardButton]]',
'List[List[PhotoSize]]',
For the current version of Telegram Bot Api (4.2), return values
may have the following `union`- annotations:
'Union [Message, bool]'
2)
"""
def list_to_api_type(obj: list, anno: AnnotationWrapper) -> list:
"""Convert list to api type."""
inner_part = anno.inner_part_of_list.sanitized.data
api_type = getattr(types_module, inner_part)
res = []
for item in obj:
to_type = {}
for field_name, field_type in api_type._annotations().items():
try:
to_type[field_name] = to_api_type(
item[field_name], AnnotationWrapper(field_type))
except KeyError:
continue
res.append(api_type(**to_type))
return res
def list_of_list_to_api_type(obj: list, anno: AnnotationWrapper):
"""Convert list of list to api type."""
res = []
for lst in obj:
res.append(list_to_api_type(lst, anno.inner_part_of_list))
return res
def union_to_api_type(obj, anno: AnnotationWrapper):
"""Convert union to api type."""
if anno == 'Union[Message, bool]':
return to_api_type(obj, AnnotationWrapper('Message'))
elif anno == 'Union[InputFile, str]]':
return to_api_type(obj, AnnotationWrapper('InputFile'))
if is_str_int_float_bool(obj):
return obj
if anno.is_optional:
anno = anno.inner_part_of_optional
if anno.is_list:
return list_to_api_type(obj, anno)
elif anno.is_list_of_list:
return list_of_list_to_api_type(obj, anno)
elif anno.is_union:
return union_to_api_type(obj, anno)
if isinstance(obj, dict):
to_type = {}
api_type = getattr(types_module, anno.sanitized.data)
for field_name, field_type in api_type._annotations().items():
try:
to_type[field_name] = to_api_type(
obj[field_name], AnnotationWrapper(field_type))
except KeyError:
continue
return api_type(**to_type)
def get_result(raw_response: str):
"""Extract the result from the raw response from the Bot API telegram.
Args:
raw_response (str): `raw` response from Bot API telegram
Raises:
RequestResultIsNotOk: if the answer contains no result
Note:
If raw_response does not contain an `ok` field, then we assume that this
is an extracted result, for example, for testing purposes.
"""
response = jsonpickle.loads(raw_response)
ok_field = response['ok']
if ok_field: # TODO: проверить тип ok_field т.е. что это булевское значение, а не строка
return response['result']
else:
description = response['description']
error_code = response['error_code']
parameters = response.get('parameters', None)
if not parameters:
raise UnsuccessfulRequest(description=description, error_code=error_code)
else:
to_rp = {}
if 'migrate_to_chat_id' in parameters:
to_rp['migrate_to_chat_id'] = parameters['migrate_to_chat_id']
if 'retry_after' in parameters:
to_rp['retry_after'] = parameters['retry_after']
raise UnsuccessfulRequest\
(description=description, error_code=error_code, parameters=ResponseParameters(**to_rp))
def handle_response(raw_response: str,
method_response_type: AnnotationWrapper):
"""Parse a string that is a response from the Telegram Bot API.
Args:
raw_response (str): response from Telegram Bot API
method_response_type (AnnotationWrapper): annotation of the expected
response
Raises:
RequestResultIsNotOk: if the answer contains no result
"""
res = get_result(raw_response)
res = replace__from__by__from_(res)
return to_api_type(res, method_response_type) | en | 0.523162 | # -*- coding: utf-8 -*- # Copyright (c) 2019 <NAME>; MIT License Response functionality from Telegram Bot Api. Is value str, int, float, bool. Get a json-like dict from the dataclass fields. Replace recursive keys in the object from to from_. Convert object to api type Convert the result of the request to the Telegram Bot API into the appropriate type. Notes: 1) In the current versions of Telegram Bot Api (4.2), the following values can be returned: 'Chat', 'ChatMember', 'File', 'List[ChatMember]', 'List[GameHighScore]', 'List[Update]', 'Message', 'Pool', 'StickerSet', 'Union[Message, bool]', 'User', 'UserProfilePhotos', 'WebhookInfo', 'bool', 'int', 'str' For the current version of Telegram Bot Api (4.2), the following union-annotations are used to create types.: 'Optional[Union[InputFile, str]]' For the current version of Telegram Bot Api (4.2), the following `List`-annotations are used to create types: 'List[EncryptedPassportElement]', 'List[LabeledPrice]', 'List[PhotoSize]', 'List[PollOption]', 'List[Sticker]', 'List[str]', For the current version of Telegram Bot Api (4.2), the following `List [List [` - annotations are used to create types: 'List[List[InlineKeyboardButton]]', 'List[List[KeyboardButton]]', 'List[List[PhotoSize]]', For the current version of Telegram Bot Api (4.2), return values may have the following `union`- annotations: 'Union [Message, bool]' 2) Convert list to api type. Convert list of list to api type. Convert union to api type. Extract the result from the raw response from the Bot API telegram. Args: raw_response (str): `raw` response from Bot API telegram Raises: RequestResultIsNotOk: if the answer contains no result Note: If raw_response does not contain an `ok` field, then we assume that this is an extracted result, for example, for testing purposes. # TODO: проверить тип ok_field т.е. что это булевское значение, а не строка Parse a string that is a response from the Telegram Bot API. Args: raw_response (str): response from Telegram Bot API method_response_type (AnnotationWrapper): annotation of the expected response Raises: RequestResultIsNotOk: if the answer contains no result | 2.799906 | 3 |
circuits/tools/__init__.py | spaceone/circuits | 0 | 6624914 | <gh_stars>0
"""Circuits Tools
circuits.tools contains a standard set of tools for circuits. These
tools are installed as executables with a prefix of "circuits."
"""
import inspect as _inspect
from functools import wraps
from warnings import warn, warn_explicit
from circuits.six import _func_code
def tryimport(modules, obj=None, message=None):
modules = (modules,) if isinstance(modules, str) else modules
for module in modules:
try:
m = __import__(module, globals(), locals())
return getattr(m, obj) if obj is not None else m
except Exception:
pass
if message is not None:
warn(message)
def getargspec(func):
getargs = _inspect.getfullargspec if hasattr(_inspect, 'getfullargspec') else _inspect.getargspec
return getargs(func)[:4]
def walk(x, f, d=0, v=None):
if not v:
v = set()
yield f(d, x)
for c in x.components.copy():
if c not in v:
v.add(c)
for r in walk(c, f, d + 1, v):
yield r
def edges(x, e=None, v=None, d=0):
if not e:
e = set()
if not v:
v = []
d += 1
for c in x.components.copy():
e.add((x, c, d))
edges(c, e, v, d)
return e
def findroot(x):
if x.parent == x:
return x
else:
return findroot(x.parent)
def kill(x):
for c in x.components.copy():
kill(c)
if x.parent is not x:
x.unregister()
def graph(x, name=None):
"""Display a directed graph of the Component structure of x
:param x: A Component or Manager to graph
:type x: Component or Manager
:param name: A name for the graph (defaults to x's name)
:type name: str
@return: A directed graph representing x's Component structure.
@rtype: str
"""
networkx = tryimport("networkx")
pygraphviz = tryimport("pygraphviz")
plt = tryimport("matplotlib.pyplot", "pyplot")
if networkx is not None and pygraphviz is not None and plt is not None:
graph_edges = []
for (u, v, d) in edges(x):
graph_edges.append((u.name, v.name, float(d)))
g = networkx.DiGraph()
g.add_weighted_edges_from(graph_edges)
elarge = [(u, v) for (u, v, d) in g.edges(data=True)
if d["weight"] > 3.0]
esmall = [(u, v) for (u, v, d) in g.edges(data=True)
if d["weight"] <= 3.0]
pos = networkx.spring_layout(g) # positions for all nodes
# nodes
networkx.draw_networkx_nodes(g, pos, node_size=700)
# edges
networkx.draw_networkx_edges(g, pos, edgelist=elarge, width=1)
networkx.draw_networkx_edges(
g, pos, edgelist=esmall, width=1,
alpha=0.5, edge_color="b", style="dashed"
)
# labels
networkx.draw_networkx_labels(
g, pos, font_size=10, font_family="sans-serif"
)
plt.axis("off")
plt.savefig("{0:s}.png".format(name or x.name))
networkx.drawing.nx_agraph.write_dot(g, "{0:s}.dot".format(name or x.name))
plt.clf()
def printer(d, x):
return "%s* %s" % (" " * d, x)
return "\n".join(walk(x, printer))
def inspect(x):
"""Display an inspection report of the Component or Manager x
:param x: A Component or Manager to graph
:type x: Component or Manager
@return: A detailed inspection report of x
@rtype: str
"""
s = []
write = s.append
write(" Components: %d\n" % len(x.components))
for component in x.components:
write(" %s\n" % component)
write("\n")
from circuits import reprhandler
write(" Event Handlers: %d\n" % len(x._handlers.values()))
for event, handlers in x._handlers.items():
write(" %s; %d\n" % (event, len(x._handlers[event])))
for handler in x._handlers[event]:
write(" %s\n" % reprhandler(handler))
return "".join(s)
def deprecated(f):
@wraps(f)
def wrapper(*args, **kwargs):
warn_explicit(
"Call to deprecated function {0:s}".format(f.__name__),
category=DeprecationWarning,
filename=getattr(f, _func_code).co_filename,
lineno=getattr(f, _func_code).co_firstlineno + 1
)
return f(*args, **kwargs)
return wrapper
| """Circuits Tools
circuits.tools contains a standard set of tools for circuits. These
tools are installed as executables with a prefix of "circuits."
"""
import inspect as _inspect
from functools import wraps
from warnings import warn, warn_explicit
from circuits.six import _func_code
def tryimport(modules, obj=None, message=None):
modules = (modules,) if isinstance(modules, str) else modules
for module in modules:
try:
m = __import__(module, globals(), locals())
return getattr(m, obj) if obj is not None else m
except Exception:
pass
if message is not None:
warn(message)
def getargspec(func):
getargs = _inspect.getfullargspec if hasattr(_inspect, 'getfullargspec') else _inspect.getargspec
return getargs(func)[:4]
def walk(x, f, d=0, v=None):
if not v:
v = set()
yield f(d, x)
for c in x.components.copy():
if c not in v:
v.add(c)
for r in walk(c, f, d + 1, v):
yield r
def edges(x, e=None, v=None, d=0):
if not e:
e = set()
if not v:
v = []
d += 1
for c in x.components.copy():
e.add((x, c, d))
edges(c, e, v, d)
return e
def findroot(x):
if x.parent == x:
return x
else:
return findroot(x.parent)
def kill(x):
for c in x.components.copy():
kill(c)
if x.parent is not x:
x.unregister()
def graph(x, name=None):
"""Display a directed graph of the Component structure of x
:param x: A Component or Manager to graph
:type x: Component or Manager
:param name: A name for the graph (defaults to x's name)
:type name: str
@return: A directed graph representing x's Component structure.
@rtype: str
"""
networkx = tryimport("networkx")
pygraphviz = tryimport("pygraphviz")
plt = tryimport("matplotlib.pyplot", "pyplot")
if networkx is not None and pygraphviz is not None and plt is not None:
graph_edges = []
for (u, v, d) in edges(x):
graph_edges.append((u.name, v.name, float(d)))
g = networkx.DiGraph()
g.add_weighted_edges_from(graph_edges)
elarge = [(u, v) for (u, v, d) in g.edges(data=True)
if d["weight"] > 3.0]
esmall = [(u, v) for (u, v, d) in g.edges(data=True)
if d["weight"] <= 3.0]
pos = networkx.spring_layout(g) # positions for all nodes
# nodes
networkx.draw_networkx_nodes(g, pos, node_size=700)
# edges
networkx.draw_networkx_edges(g, pos, edgelist=elarge, width=1)
networkx.draw_networkx_edges(
g, pos, edgelist=esmall, width=1,
alpha=0.5, edge_color="b", style="dashed"
)
# labels
networkx.draw_networkx_labels(
g, pos, font_size=10, font_family="sans-serif"
)
plt.axis("off")
plt.savefig("{0:s}.png".format(name or x.name))
networkx.drawing.nx_agraph.write_dot(g, "{0:s}.dot".format(name or x.name))
plt.clf()
def printer(d, x):
return "%s* %s" % (" " * d, x)
return "\n".join(walk(x, printer))
def inspect(x):
"""Display an inspection report of the Component or Manager x
:param x: A Component or Manager to graph
:type x: Component or Manager
@return: A detailed inspection report of x
@rtype: str
"""
s = []
write = s.append
write(" Components: %d\n" % len(x.components))
for component in x.components:
write(" %s\n" % component)
write("\n")
from circuits import reprhandler
write(" Event Handlers: %d\n" % len(x._handlers.values()))
for event, handlers in x._handlers.items():
write(" %s; %d\n" % (event, len(x._handlers[event])))
for handler in x._handlers[event]:
write(" %s\n" % reprhandler(handler))
return "".join(s)
def deprecated(f):
@wraps(f)
def wrapper(*args, **kwargs):
warn_explicit(
"Call to deprecated function {0:s}".format(f.__name__),
category=DeprecationWarning,
filename=getattr(f, _func_code).co_filename,
lineno=getattr(f, _func_code).co_firstlineno + 1
)
return f(*args, **kwargs)
return wrapper | en | 0.81707 | Circuits Tools circuits.tools contains a standard set of tools for circuits. These tools are installed as executables with a prefix of "circuits." Display a directed graph of the Component structure of x :param x: A Component or Manager to graph :type x: Component or Manager :param name: A name for the graph (defaults to x's name) :type name: str @return: A directed graph representing x's Component structure. @rtype: str # positions for all nodes # nodes # edges # labels Display an inspection report of the Component or Manager x :param x: A Component or Manager to graph :type x: Component or Manager @return: A detailed inspection report of x @rtype: str | 2.573513 | 3 |
docusign_esign/models/contact.py | pivotal-energy-solutions/docusign-python-client | 0 | 6624915 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Contact(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, contact_id=None, contact_phone_numbers=None, contact_uri=None, emails=None, error_details=None, name=None, organization=None, shared=None, signing_group=None, signing_group_name=None):
"""
Contact - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'contact_id': 'str',
'contact_phone_numbers': 'list[ContactPhoneNumber]',
'contact_uri': 'str',
'emails': 'list[str]',
'error_details': 'ErrorDetails',
'name': 'str',
'organization': 'str',
'shared': 'str',
'signing_group': 'str',
'signing_group_name': 'str'
}
self.attribute_map = {
'contact_id': 'contactId',
'contact_phone_numbers': 'contactPhoneNumbers',
'contact_uri': 'contactUri',
'emails': 'emails',
'error_details': 'errorDetails',
'name': 'name',
'organization': 'organization',
'shared': 'shared',
'signing_group': 'signingGroup',
'signing_group_name': 'signingGroupName'
}
self._contact_id = contact_id
self._contact_phone_numbers = contact_phone_numbers
self._contact_uri = contact_uri
self._emails = emails
self._error_details = error_details
self._name = name
self._organization = organization
self._shared = shared
self._signing_group = signing_group
self._signing_group_name = signing_group_name
@property
def contact_id(self):
"""
Gets the contact_id of this Contact.
:return: The contact_id of this Contact.
:rtype: str
"""
return self._contact_id
@contact_id.setter
def contact_id(self, contact_id):
"""
Sets the contact_id of this Contact.
:param contact_id: The contact_id of this Contact.
:type: str
"""
self._contact_id = contact_id
@property
def contact_phone_numbers(self):
"""
Gets the contact_phone_numbers of this Contact.
:return: The contact_phone_numbers of this Contact.
:rtype: list[ContactPhoneNumber]
"""
return self._contact_phone_numbers
@contact_phone_numbers.setter
def contact_phone_numbers(self, contact_phone_numbers):
"""
Sets the contact_phone_numbers of this Contact.
:param contact_phone_numbers: The contact_phone_numbers of this Contact.
:type: list[ContactPhoneNumber]
"""
self._contact_phone_numbers = contact_phone_numbers
@property
def contact_uri(self):
"""
Gets the contact_uri of this Contact.
:return: The contact_uri of this Contact.
:rtype: str
"""
return self._contact_uri
@contact_uri.setter
def contact_uri(self, contact_uri):
"""
Sets the contact_uri of this Contact.
:param contact_uri: The contact_uri of this Contact.
:type: str
"""
self._contact_uri = contact_uri
@property
def emails(self):
"""
Gets the emails of this Contact.
:return: The emails of this Contact.
:rtype: list[str]
"""
return self._emails
@emails.setter
def emails(self, emails):
"""
Sets the emails of this Contact.
:param emails: The emails of this Contact.
:type: list[str]
"""
self._emails = emails
@property
def error_details(self):
"""
Gets the error_details of this Contact.
:return: The error_details of this Contact.
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this Contact.
:param error_details: The error_details of this Contact.
:type: ErrorDetails
"""
self._error_details = error_details
@property
def name(self):
"""
Gets the name of this Contact.
:return: The name of this Contact.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Contact.
:param name: The name of this Contact.
:type: str
"""
self._name = name
@property
def organization(self):
"""
Gets the organization of this Contact.
:return: The organization of this Contact.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""
Sets the organization of this Contact.
:param organization: The organization of this Contact.
:type: str
"""
self._organization = organization
@property
def shared(self):
"""
Gets the shared of this Contact.
When set to **true**, this custom tab is shared.
:return: The shared of this Contact.
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""
Sets the shared of this Contact.
When set to **true**, this custom tab is shared.
:param shared: The shared of this Contact.
:type: str
"""
self._shared = shared
@property
def signing_group(self):
"""
Gets the signing_group of this Contact.
:return: The signing_group of this Contact.
:rtype: str
"""
return self._signing_group
@signing_group.setter
def signing_group(self, signing_group):
"""
Sets the signing_group of this Contact.
:param signing_group: The signing_group of this Contact.
:type: str
"""
self._signing_group = signing_group
@property
def signing_group_name(self):
"""
Gets the signing_group_name of this Contact.
The display name for the signing group. Maximum Length: 100 characters.
:return: The signing_group_name of this Contact.
:rtype: str
"""
return self._signing_group_name
@signing_group_name.setter
def signing_group_name(self, signing_group_name):
"""
Sets the signing_group_name of this Contact.
The display name for the signing group. Maximum Length: 100 characters.
:param signing_group_name: The signing_group_name of this Contact.
:type: str
"""
self._signing_group_name = signing_group_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Contact(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, contact_id=None, contact_phone_numbers=None, contact_uri=None, emails=None, error_details=None, name=None, organization=None, shared=None, signing_group=None, signing_group_name=None):
"""
Contact - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'contact_id': 'str',
'contact_phone_numbers': 'list[ContactPhoneNumber]',
'contact_uri': 'str',
'emails': 'list[str]',
'error_details': 'ErrorDetails',
'name': 'str',
'organization': 'str',
'shared': 'str',
'signing_group': 'str',
'signing_group_name': 'str'
}
self.attribute_map = {
'contact_id': 'contactId',
'contact_phone_numbers': 'contactPhoneNumbers',
'contact_uri': 'contactUri',
'emails': 'emails',
'error_details': 'errorDetails',
'name': 'name',
'organization': 'organization',
'shared': 'shared',
'signing_group': 'signingGroup',
'signing_group_name': 'signingGroupName'
}
self._contact_id = contact_id
self._contact_phone_numbers = contact_phone_numbers
self._contact_uri = contact_uri
self._emails = emails
self._error_details = error_details
self._name = name
self._organization = organization
self._shared = shared
self._signing_group = signing_group
self._signing_group_name = signing_group_name
@property
def contact_id(self):
"""
Gets the contact_id of this Contact.
:return: The contact_id of this Contact.
:rtype: str
"""
return self._contact_id
@contact_id.setter
def contact_id(self, contact_id):
"""
Sets the contact_id of this Contact.
:param contact_id: The contact_id of this Contact.
:type: str
"""
self._contact_id = contact_id
@property
def contact_phone_numbers(self):
"""
Gets the contact_phone_numbers of this Contact.
:return: The contact_phone_numbers of this Contact.
:rtype: list[ContactPhoneNumber]
"""
return self._contact_phone_numbers
@contact_phone_numbers.setter
def contact_phone_numbers(self, contact_phone_numbers):
"""
Sets the contact_phone_numbers of this Contact.
:param contact_phone_numbers: The contact_phone_numbers of this Contact.
:type: list[ContactPhoneNumber]
"""
self._contact_phone_numbers = contact_phone_numbers
@property
def contact_uri(self):
"""
Gets the contact_uri of this Contact.
:return: The contact_uri of this Contact.
:rtype: str
"""
return self._contact_uri
@contact_uri.setter
def contact_uri(self, contact_uri):
"""
Sets the contact_uri of this Contact.
:param contact_uri: The contact_uri of this Contact.
:type: str
"""
self._contact_uri = contact_uri
@property
def emails(self):
"""
Gets the emails of this Contact.
:return: The emails of this Contact.
:rtype: list[str]
"""
return self._emails
@emails.setter
def emails(self, emails):
"""
Sets the emails of this Contact.
:param emails: The emails of this Contact.
:type: list[str]
"""
self._emails = emails
@property
def error_details(self):
"""
Gets the error_details of this Contact.
:return: The error_details of this Contact.
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this Contact.
:param error_details: The error_details of this Contact.
:type: ErrorDetails
"""
self._error_details = error_details
@property
def name(self):
"""
Gets the name of this Contact.
:return: The name of this Contact.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Contact.
:param name: The name of this Contact.
:type: str
"""
self._name = name
@property
def organization(self):
"""
Gets the organization of this Contact.
:return: The organization of this Contact.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""
Sets the organization of this Contact.
:param organization: The organization of this Contact.
:type: str
"""
self._organization = organization
@property
def shared(self):
"""
Gets the shared of this Contact.
When set to **true**, this custom tab is shared.
:return: The shared of this Contact.
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""
Sets the shared of this Contact.
When set to **true**, this custom tab is shared.
:param shared: The shared of this Contact.
:type: str
"""
self._shared = shared
@property
def signing_group(self):
"""
Gets the signing_group of this Contact.
:return: The signing_group of this Contact.
:rtype: str
"""
return self._signing_group
@signing_group.setter
def signing_group(self, signing_group):
"""
Sets the signing_group of this Contact.
:param signing_group: The signing_group of this Contact.
:type: str
"""
self._signing_group = signing_group
@property
def signing_group_name(self):
"""
Gets the signing_group_name of this Contact.
The display name for the signing group. Maximum Length: 100 characters.
:return: The signing_group_name of this Contact.
:rtype: str
"""
return self._signing_group_name
@signing_group_name.setter
def signing_group_name(self, signing_group_name):
"""
Sets the signing_group_name of this Contact.
The display name for the signing group. Maximum Length: 100 characters.
:param signing_group_name: The signing_group_name of this Contact.
:type: str
"""
self._signing_group_name = signing_group_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| en | 0.733636 | # coding: utf-8 DocuSign REST API The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. OpenAPI spec version: v2 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Contact - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. Gets the contact_id of this Contact. :return: The contact_id of this Contact. :rtype: str Sets the contact_id of this Contact. :param contact_id: The contact_id of this Contact. :type: str Gets the contact_phone_numbers of this Contact. :return: The contact_phone_numbers of this Contact. :rtype: list[ContactPhoneNumber] Sets the contact_phone_numbers of this Contact. :param contact_phone_numbers: The contact_phone_numbers of this Contact. :type: list[ContactPhoneNumber] Gets the contact_uri of this Contact. :return: The contact_uri of this Contact. :rtype: str Sets the contact_uri of this Contact. :param contact_uri: The contact_uri of this Contact. :type: str Gets the emails of this Contact. :return: The emails of this Contact. :rtype: list[str] Sets the emails of this Contact. :param emails: The emails of this Contact. :type: list[str] Gets the error_details of this Contact. :return: The error_details of this Contact. :rtype: ErrorDetails Sets the error_details of this Contact. :param error_details: The error_details of this Contact. :type: ErrorDetails Gets the name of this Contact. :return: The name of this Contact. :rtype: str Sets the name of this Contact. :param name: The name of this Contact. :type: str Gets the organization of this Contact. :return: The organization of this Contact. :rtype: str Sets the organization of this Contact. :param organization: The organization of this Contact. :type: str Gets the shared of this Contact. When set to **true**, this custom tab is shared. :return: The shared of this Contact. :rtype: str Sets the shared of this Contact. When set to **true**, this custom tab is shared. :param shared: The shared of this Contact. :type: str Gets the signing_group of this Contact. :return: The signing_group of this Contact. :rtype: str Sets the signing_group of this Contact. :param signing_group: The signing_group of this Contact. :type: str Gets the signing_group_name of this Contact. The display name for the signing group. Maximum Length: 100 characters. :return: The signing_group_name of this Contact. :rtype: str Sets the signing_group_name of this Contact. The display name for the signing group. Maximum Length: 100 characters. :param signing_group_name: The signing_group_name of this Contact. :type: str Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.958006 | 2 |
autogl/module/model/decoders/__init__.py | dedsec-9/AutoGL | 0 | 6624916 | <reponame>dedsec-9/AutoGL
from .base_decoder import BaseDecoderMaintainer
from .decoder_registry import DecoderUniversalRegistry
from autogl.backend import DependentBackend
if DependentBackend.is_pyg():
from ._pyg import (
LogSoftmaxDecoderMaintainer,
SumPoolMLPDecoderMaintainer,
DiffPoolDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
else:
from ._dgl import (
LogSoftmaxDecoderMaintainer,
JKSumPoolDecoderMaintainer,
TopKDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
__all__ = [
"BaseDecoderMaintainer",
"DecoderUniversalRegistry",
"LogSoftmaxDecoderMaintainer",
"DotProductLinkPredictionDecoderMaintainer"
]
if DependentBackend.is_pyg():
__all__.extend([
"DiffPoolDecoderMaintainer",
"SumPoolMLPDecoderMaintainer"
])
else:
__all__.extend([
"JKSumPoolDecoderMaintainer",
"TopKDecoderMaintainer"
])
| from .base_decoder import BaseDecoderMaintainer
from .decoder_registry import DecoderUniversalRegistry
from autogl.backend import DependentBackend
if DependentBackend.is_pyg():
from ._pyg import (
LogSoftmaxDecoderMaintainer,
SumPoolMLPDecoderMaintainer,
DiffPoolDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
else:
from ._dgl import (
LogSoftmaxDecoderMaintainer,
JKSumPoolDecoderMaintainer,
TopKDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
__all__ = [
"BaseDecoderMaintainer",
"DecoderUniversalRegistry",
"LogSoftmaxDecoderMaintainer",
"DotProductLinkPredictionDecoderMaintainer"
]
if DependentBackend.is_pyg():
__all__.extend([
"DiffPoolDecoderMaintainer",
"SumPoolMLPDecoderMaintainer"
])
else:
__all__.extend([
"JKSumPoolDecoderMaintainer",
"TopKDecoderMaintainer"
]) | none | 1 | 1.797511 | 2 | |
.venv/lib/python3.8/site-packages/sympy/polys/domains/gmpyrationalfield.py | RivtLib/replit01 | 603 | 6624917 | <filename>.venv/lib/python3.8/site-packages/sympy/polys/domains/gmpyrationalfield.py
"""Implementation of :class:`GMPYRationalField` class. """
from sympy.polys.domains.groundtypes import (
GMPYRational, SymPyRational,
gmpy_numer, gmpy_denom, gmpy_factorial,
)
from sympy.polys.domains.rationalfield import RationalField
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class GMPYRationalField(RationalField):
"""Rational field based on GMPY's ``mpq`` type.
This will be the implementation of :ref:`QQ` if ``gmpy`` or ``gmpy2`` is
installed. Elements will be of type ``gmpy.mpq``.
"""
dtype = GMPYRational
zero = dtype(0)
one = dtype(1)
tp = type(one)
alias = 'QQ_gmpy'
def __init__(self):
pass
def get_ring(self):
"""Returns ring associated with ``self``. """
from sympy.polys.domains import GMPYIntegerRing
return GMPYIntegerRing()
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyRational(int(gmpy_numer(a)),
int(gmpy_denom(a)))
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Rational:
return GMPYRational(a.p, a.q)
elif a.is_Float:
from sympy.polys.domains import RR
return GMPYRational(*map(int, RR.to_rational(a)))
else:
raise CoercionFailed("expected ``Rational`` object, got %s" % a)
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return GMPYRational(a.numerator, a.denominator)
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return a
def from_GaussianRationalField(K1, a, K0):
"""Convert a ``GaussianElement`` object to ``dtype``. """
if a.y == 0:
return GMPYRational(a.x)
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return GMPYRational(*map(int, K0.to_rational(a)))
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies nothing. """
return self.zero
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b), self.zero
def numer(self, a):
"""Returns numerator of ``a``. """
return a.numerator
def denom(self, a):
"""Returns denominator of ``a``. """
return a.denominator
def factorial(self, a):
"""Returns factorial of ``a``. """
return GMPYRational(gmpy_factorial(int(a)))
| <filename>.venv/lib/python3.8/site-packages/sympy/polys/domains/gmpyrationalfield.py
"""Implementation of :class:`GMPYRationalField` class. """
from sympy.polys.domains.groundtypes import (
GMPYRational, SymPyRational,
gmpy_numer, gmpy_denom, gmpy_factorial,
)
from sympy.polys.domains.rationalfield import RationalField
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class GMPYRationalField(RationalField):
"""Rational field based on GMPY's ``mpq`` type.
This will be the implementation of :ref:`QQ` if ``gmpy`` or ``gmpy2`` is
installed. Elements will be of type ``gmpy.mpq``.
"""
dtype = GMPYRational
zero = dtype(0)
one = dtype(1)
tp = type(one)
alias = 'QQ_gmpy'
def __init__(self):
pass
def get_ring(self):
"""Returns ring associated with ``self``. """
from sympy.polys.domains import GMPYIntegerRing
return GMPYIntegerRing()
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyRational(int(gmpy_numer(a)),
int(gmpy_denom(a)))
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Rational:
return GMPYRational(a.p, a.q)
elif a.is_Float:
from sympy.polys.domains import RR
return GMPYRational(*map(int, RR.to_rational(a)))
else:
raise CoercionFailed("expected ``Rational`` object, got %s" % a)
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return GMPYRational(a.numerator, a.denominator)
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return GMPYRational(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return a
def from_GaussianRationalField(K1, a, K0):
"""Convert a ``GaussianElement`` object to ``dtype``. """
if a.y == 0:
return GMPYRational(a.x)
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return GMPYRational(*map(int, K0.to_rational(a)))
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b)
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies nothing. """
return self.zero
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__truediv__``. """
return GMPYRational(a) / GMPYRational(b), self.zero
def numer(self, a):
"""Returns numerator of ``a``. """
return a.numerator
def denom(self, a):
"""Returns denominator of ``a``. """
return a.denominator
def factorial(self, a):
"""Returns factorial of ``a``. """
return GMPYRational(gmpy_factorial(int(a)))
| en | 0.585749 | Implementation of :class:`GMPYRationalField` class. Rational field based on GMPY's ``mpq`` type. This will be the implementation of :ref:`QQ` if ``gmpy`` or ``gmpy2`` is installed. Elements will be of type ``gmpy.mpq``. Returns ring associated with ``self``. Convert ``a`` to a SymPy object. Convert SymPy's Integer to ``dtype``. Convert a Python ``int`` object to ``dtype``. Convert a Python ``Fraction`` object to ``dtype``. Convert a GMPY ``mpz`` object to ``dtype``. Convert a GMPY ``mpq`` object to ``dtype``. Convert a ``GaussianElement`` object to ``dtype``. Convert a mpmath ``mpf`` object to ``dtype``. Exact quotient of ``a`` and ``b``, implies ``__truediv__``. Quotient of ``a`` and ``b``, implies ``__truediv__``. Remainder of ``a`` and ``b``, implies nothing. Division of ``a`` and ``b``, implies ``__truediv__``. Returns numerator of ``a``. Returns denominator of ``a``. Returns factorial of ``a``. | 2.348285 | 2 |
gamebeater/profiles/forms.py | GTmmiller/gamebeater | 1 | 6624918 | from django import forms
from django.contrib.auth.models import User
from .statuses import CompletionStatus
from .models import GameOwnership, Goal
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class CompletionStatusUpdateForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('completion_status',)
class GameOwnershipForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('owner', 'game', 'platform', 'ownership_status', 'completion_status')
widgets = {'owner': forms.HiddenInput(), 'game': forms.HiddenInput}
class GameOwnershipUpdateForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('platform', 'ownership_status', 'completion_status')
class GoalForm(forms.ModelForm):
class Meta:
model = Goal
fields = ('ownership', 'text', 'start_time', 'complete_time', 'status')
widgets = {'ownership': forms.HiddenInput()}
class GoalCompletionStatusUpdateForm(forms.ModelForm):
class Meta:
model = Goal
fields = ('status',) | from django import forms
from django.contrib.auth.models import User
from .statuses import CompletionStatus
from .models import GameOwnership, Goal
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class CompletionStatusUpdateForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('completion_status',)
class GameOwnershipForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('owner', 'game', 'platform', 'ownership_status', 'completion_status')
widgets = {'owner': forms.HiddenInput(), 'game': forms.HiddenInput}
class GameOwnershipUpdateForm(forms.ModelForm):
class Meta:
model = GameOwnership
fields = ('platform', 'ownership_status', 'completion_status')
class GoalForm(forms.ModelForm):
class Meta:
model = Goal
fields = ('ownership', 'text', 'start_time', 'complete_time', 'status')
widgets = {'ownership': forms.HiddenInput()}
class GoalCompletionStatusUpdateForm(forms.ModelForm):
class Meta:
model = Goal
fields = ('status',) | none | 1 | 2.042032 | 2 | |
test/sql/test_defaults.py | lxl0928/timi_sqlalchemy | 1 | 6624919 | <reponame>lxl0928/timi_sqlalchemy<filename>test/sql/test_defaults.py<gh_stars>1-10
import datetime
import itertools
import sqlalchemy as sa
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import DateTime
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.dialects import sqlite
from sqlalchemy.schema import CreateSequence
from sqlalchemy.schema import CreateTable
from sqlalchemy.schema import DropSequence
from sqlalchemy.sql import literal_column
from sqlalchemy.sql import select
from sqlalchemy.sql import text
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.types import TypeDecorator
from sqlalchemy.types import TypeEngine
from sqlalchemy.util import b
from sqlalchemy.util import u
t = f = f2 = ts = currenttime = metadata = default_generator = None
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_string(self):
# note: that the datatype is an Integer here doesn't matter,
# the server_default is interpreted independently of the
# column's datatype.
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5')"
)
def test_string_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5'6"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5''6')"
)
def test_text(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 + 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 + 8)"
)
def test_text_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 ' 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 ' 8)"
)
def test_literal_binds_w_quotes(self):
m = MetaData()
t = Table(
"t", m, Column("x", Integer, server_default=literal("5 ' 8"))
)
self.assert_compile(
CreateTable(t), """CREATE TABLE t (x INTEGER DEFAULT '5 '' 8')"""
)
def test_text_literal_binds(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x", Integer, server_default=text("q + :x1").bindparams(x1=7)
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT q + 7)"
)
def test_sqlexpr(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x",
Integer,
server_default=literal_column("a") + literal_column("b"),
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT a + b)"
)
def test_literal_binds_plain(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, server_default=literal("a") + literal("b")),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 'a' || 'b')"
)
def test_literal_binds_pgarray(self):
from sqlalchemy.dialects.postgresql import ARRAY, array
m = MetaData()
t = Table(
"t",
m,
Column("x", ARRAY(Integer), server_default=array([1, 2, 3])),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])",
dialect="postgresql",
)
class DefaultTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global t, f, f2, ts, currenttime, metadata, default_generator
db = testing.db
metadata = MetaData(db)
default_generator = {"x": 50}
def mydefault():
default_generator["x"] += 1
return default_generator["x"]
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("13")])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
return conn.execute(sa.select([sa.text("12")])).scalar()
finally:
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
use_function_defaults = testing.against("postgresql", "mssql")
is_oracle = testing.against("oracle")
class MyClass(object):
@classmethod
def gen_default(cls, ctx):
return "hi"
class MyType(TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = "BIND" + value
return value
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
currenttime = func.current_date(type_=sa.Date, bind=db)
if is_oracle:
ts = db.scalar(
sa.select(
[
func.trunc(
func.current_timestamp(),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
]
)
)
assert isinstance(ts, datetime.date) and not isinstance(
ts, datetime.datetime
)
f = sa.select([func.length("abcdef")], bind=db).scalar()
f2 = sa.select([func.length("abcdefghijk")], bind=db).scalar()
# TODO: engine propagation across nested functions not working
currenttime = func.trunc(
currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date
)
def1 = currenttime
def2 = func.trunc(
sa.text("current_timestamp"),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
deftype = sa.Date
elif use_function_defaults:
f = sa.select([func.length("abcdef")], bind=db).scalar()
f2 = sa.select([func.length("abcdefghijk")], bind=db).scalar()
def1 = currenttime
deftype = sa.Date
if testing.against("mssql"):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = db.scalar(func.current_date())
else:
f = len("abcdef")
f2 = len("abcdefghijk")
def1 = def2 = "3"
ts = 3
deftype = Integer
t = Table(
"default_test1",
metadata,
# python function
Column("col1", Integer, primary_key=True, default=mydefault),
# python literal
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
# preexecute expression
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
# SQL-side default from sql expression
Column("col4", deftype, server_default=def1),
# SQL-side default from literal expression
Column("col5", deftype, server_default=def2),
# preexecute + update timestamp
Column("col6", sa.Date, default=currenttime, onupdate=currenttime),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
# python function which uses ExecutionContext
Column(
"col7",
Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx,
),
# python builtin
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
# combo
Column("col9", String(20), default="py", server_default="ddl"),
# python method w/ context
Column("col10", String(20), default=MyClass.gen_default),
# fixed default w/ type that has bound processor
Column("col11", MyType(), default="foo"),
)
t.create()
@classmethod
def teardown_class(cls):
t.drop()
def teardown(self):
default_generator["x"] = 50
t.delete().execute()
def test_bad_arg_signature(self):
ex_msg = (
"ColumnDefault Python function takes zero "
"or one positional arguments"
)
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(
sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn
)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a(object):
def __init__(self, x):
eq_(x, "context")
class fn6b(object):
def __init__(self, x, y=3):
eq_(x, "context")
class FN7(object):
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
@testing.fails_on("firebird", "Data type unknown")
def test_standalone(self):
c = testing.db.engine.connect()
x = c.execute(t.c.col1.default)
y = t.c.col2.default.execute()
z = c.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, "imthedefault")
eq_(z, f)
eq_(f2, 11)
def test_py_vs_server_default_detection(self):
def has_(name, *wanted):
slots = [
"default",
"onupdate",
"server_default",
"server_onupdate",
]
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
tbl = t
has_("col1", "default")
has_("col2", "default", "onupdate")
has_("col3", "default", "onupdate")
has_("col4", "server_default")
has_("col5", "server_default")
has_("col6", "default", "onupdate")
has_("boolcol1", "default")
has_("boolcol2", "default")
has_("col7", "default", "onupdate")
has_("col8", "default", "onupdate")
has_("col9", "default", "server_default")
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
t2 = Table(
"t2",
MetaData(),
Column("col1", Integer, Sequence("foo")),
Column(
"col2", Integer, default=Sequence("foo"), server_default="y"
),
Column("col3", Integer, Sequence("foo"), server_default="x"),
Column("col4", Integer, ColumnDefault("x"), DefaultClause("y")),
Column(
"col4",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
DefaultClause("y", for_update=True),
),
Column(
"col5",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
onupdate="z",
),
Column(
"col6",
Integer,
ColumnDefault("x"),
server_default="y",
onupdate="z",
),
Column(
"col7", Integer, default="x", server_default="y", onupdate="z"
),
Column(
"col8",
Integer,
server_onupdate="u",
default="x",
server_default="y",
onupdate="z",
),
)
tbl = t2
has_("col1", "default")
has_("col2", "default", "server_default")
has_("col3", "default", "server_default")
has_("col4", "default", "server_default", "server_onupdate")
has_("col5", "default", "server_default", "onupdate")
has_("col6", "default", "server_default", "onupdate")
has_("col7", "default", "server_default", "onupdate")
has_(
"col8", "default", "server_default", "onupdate", "server_onupdate"
)
@testing.fails_on("firebird", "Data type unknown")
def test_insert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
t.insert().execute()
ctexec = sa.select(
[currenttime.label("now")], bind=testing.db
).scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
x,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
)
for x in range(51, 54)
],
)
t.insert().execute(col9=None)
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
eq_(
t.select(t.c.col1 == 54).execute().fetchall(),
[
(
54,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
None,
"hi",
"BINDfoo",
)
],
)
def test_insertmany(self):
t.insert().execute({}, {}, {})
ctexec = currenttime.scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.requires.multivalues_inserts
def test_insert_multivalues(self):
t.insert().values([{}, {}, {}]).execute()
ctexec = currenttime.scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
for const in (
sa.Sequence("y"),
sa.ColumnDefault("y"),
sa.DefaultClause("y"),
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object expected, got object of type "
"<.* 'list'> instead",
t.select,
[const],
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.insert().values(col4=const),
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.update().values(col4=const),
)
def test_missing_many_param(self):
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'col7', in parameter "
"group 1",
t.insert().execute,
{"col4": 7, "col7": 12, "col8": 19},
{"col4": 7, "col8": 19},
{"col4": 7, "col7": 12, "col8": 19},
)
def test_insert_values(self):
t.insert(values={"col3": 50}).execute()
result = t.select().execute()
eq_(50, result.first()["col3"])
@testing.fails_on("firebird", "Data type unknown")
def test_updatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if testing.against(
"mysql+mysqldb"
) and testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2):
return
t.insert().execute({}, {}, {})
t.update(t.c.col1 == sa.bindparam("pkval")).execute(
{"pkval": 51, "col7": None, "col8": None, "boolcol1": False}
)
t.update(t.c.col1 == sa.bindparam("pkval")).execute(
{"pkval": 51}, {"pkval": 52}, {"pkval": 53}
)
result = t.select().execute()
ctexec = currenttime.scalar()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"im the update",
f2,
ts,
ts,
ctexec,
False,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"im the update",
f2,
ts,
ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"im the update",
f2,
ts,
ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.fails_on("firebird", "Data type unknown")
def test_update(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk).execute(col4=None, col5=None)
ctexec = currenttime.scalar()
result = t.select(t.c.col1 == pk).execute()
result = result.first()
eq_(
result,
(
pk,
"im the update",
f2,
None,
None,
ctexec,
True,
False,
13,
datetime.date.today(),
"py",
"hi",
"BINDfoo",
),
)
eq_(11, f2)
@testing.fails_on("firebird", "Data type unknown")
def test_update_values(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk, values={"col3": 55}).execute()
result = t.select(t.c.col1 == pk).execute()
result = result.first()
eq_(55, result["col3"])
class CTEDefaultTest(fixtures.TablesTest):
__requires__ = ("ctes", "returning", "ctes_on_dml")
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"q",
metadata,
Column("x", Integer, default=2),
Column("y", Integer, onupdate=5),
Column("z", Integer),
)
Table(
"p",
metadata,
Column("s", Integer),
Column("t", Integer),
Column("u", Integer, onupdate=1),
)
def _test_a_in_b(self, a, b):
q = self.tables.q
p = self.tables.p
with testing.db.connect() as conn:
if a == "delete":
conn.execute(q.insert().values(y=10, z=1))
cte = q.delete().where(q.c.z == 1).returning(q.c.z).cte("c")
expected = None
elif a == "insert":
cte = q.insert().values(z=1, y=10).returning(q.c.z).cte("c")
expected = (2, 10)
elif a == "update":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = (
q.update()
.where(q.c.z == 1)
.values(x=7)
.returning(q.c.z)
.cte("c")
)
expected = (7, 5)
elif a == "select":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = sa.select([q.c.z]).cte("c")
expected = (5, 10)
if b == "select":
conn.execute(p.insert().values(s=1))
stmt = select([p.c.s, cte.c.z])
elif b == "insert":
sel = select([1, cte.c.z])
stmt = (
p.insert()
.from_select(["s", "t"], sel)
.returning(p.c.s, p.c.t)
)
elif b == "delete":
stmt = (
p.insert().values(s=1, t=cte.c.z).returning(p.c.s, cte.c.z)
)
elif b == "update":
conn.execute(p.insert().values(s=1))
stmt = (
p.update()
.values(t=5)
.where(p.c.s == cte.c.z)
.returning(p.c.u, cte.c.z)
)
eq_(conn.execute(stmt).fetchall(), [(1, 1)])
eq_(conn.execute(select([q.c.x, q.c.y])).fetchone(), expected)
@testing.requires.ctes_on_dml
def test_update_in_select(self):
self._test_a_in_b("update", "select")
@testing.requires.ctes_on_dml
def test_delete_in_select(self):
self._test_a_in_b("update", "select")
@testing.requires.ctes_on_dml
def test_insert_in_select(self):
self._test_a_in_b("update", "select")
def test_select_in_update(self):
self._test_a_in_b("select", "update")
def test_select_in_insert(self):
self._test_a_in_b("select", "insert")
# TODO: updates / inserts can be run in one statement w/ CTE ?
# deletes?
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ("subqueries",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
t2 = Table("t2", metadata, Column("nextid", Integer))
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar(),
),
Column("data", String(30)),
)
Table(
"date_table",
metadata,
Column(
"date_id",
DateTime,
default=text("current_timestamp"),
primary_key=True,
),
)
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1, date_table = (
self.tables.t2,
self.tables.t1,
self.tables.date_table,
)
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(
options={"implicit_returning": returning}
)
with engine.begin() as conn:
conn.execute(t2.insert(), nextid=1)
r = conn.execute(t1.insert(), data="hi")
eq_([1], r.inserted_primary_key)
conn.execute(t2.insert(), nextid=2)
r = conn.execute(t1.insert(), data="there")
eq_([2], r.inserted_primary_key)
r = conn.execute(date_table.insert())
assert isinstance(r.inserted_primary_key[0], datetime.datetime)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"aitable",
metadata,
Column(
"id",
Integer,
Sequence("ai_id_seq", optional=True),
primary_key=True,
),
Column("int1", Integer),
Column("str1", String(20)),
)
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on("firebird", "Data type unknown")
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1="row 2")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1="row 3")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={"int1": func.length("four")}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1, 2, 3, 4]))
eq_(
list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, "row 2"), (3, 3, "row 3"), (4, 4, None)],
)
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
con = testing.db.connect()
tx = con.begin()
try:
try:
self._test_autoincrement(con)
except Exception:
try:
tx.rollback()
except Exception:
pass
raise
else:
tx.commit()
finally:
con.close()
class EmptyInsertTest(fixtures.TestBase):
__backend__ = True
@testing.exclude("sqlite", "<", (3, 3, 8), "no empty insert support")
@testing.fails_on("oracle", "FIXME: unknown")
@testing.provide_metadata
def test_empty_insert(self):
t1 = Table(
"t1",
self.metadata,
Column("is_true", Boolean, server_default=("1")),
)
self.metadata.create_all()
t1.insert().execute()
eq_(1, select([func.count(text("*"))], from_obj=t1).scalar())
eq_(True, t1.select().scalar())
class AutoIncrementTest(fixtures.TablesTest):
__requires__ = ("identity",)
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
"""Each test manipulates self.metadata individually."""
@testing.exclude("sqlite", "<", (3, 4), "no database support")
def test_autoincrement_single_col(self):
single = Table(
"single", self.metadata, Column("id", Integer, primary_key=True)
)
single.create()
r = single.insert().execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(1, sa.select([func.count(sa.text("*"))], from_obj=single).scalar())
def test_autoincrement_fk(self):
nodes = Table(
"nodes",
self.metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("nodes.id")),
Column("data", String(30)),
)
nodes.create()
r = nodes.insert().execute(data="foo")
id_ = r.inserted_primary_key[0]
nodes.insert().execute(data="bar", parent_id=id_)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table("x", MetaData(), Column("id", MyType(), primary_key=True))
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column(
"id",
Integer(),
ForeignKey("y.id"),
autoincrement="ignore_fk",
primary_key=True,
),
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column("id", Integer(), ForeignKey("y.id"), primary_key=True),
)
assert x._autoincrement_column is None
@testing.only_on("sqlite")
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table(
"nonaitest",
self.metadata,
Column("id", Integer, autoincrement=False, primary_key=True),
Column("data", String(20)),
)
nonai.create()
def go():
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
nonai.insert().execute(data="row 1")
nonai.insert().execute(data="row 2")
# just testing SQLite for now, it passes
with expect_warnings(".*has no Python-side or server-side default.*"):
go()
def test_col_w_sequence_non_autoinc_no_firing(self):
metadata = self.metadata
# plain autoincrement/PK table in the actual schema
Table("x", metadata, Column("set_id", Integer, primary_key=True))
metadata.create_all()
# for the INSERT use a table with a Sequence
# and autoincrement=False. Using a ForeignKey
# would have the same effect
dataset_no_autoinc = Table(
"x",
MetaData(),
Column(
"set_id",
Integer,
Sequence("some_seq"),
primary_key=True,
autoincrement=False,
),
)
testing.db.execute(dataset_no_autoinc.insert())
eq_(
testing.db.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
__backend__ = True
def test_create_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence("foo_seq")), "CREATE SEQUENCE foo_seq"
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, minvalue=0)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 MINVALUE 0",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, maxvalue=5)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 MAXVALUE 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, nomaxvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 NO MAXVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, nominvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 NO MINVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", start=1, maxvalue=10, cycle=True)
),
"CREATE SEQUENCE foo_seq START WITH 1 MAXVALUE 10 CYCLE",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", cache=1000, order=True)),
"CREATE SEQUENCE foo_seq CACHE 1000 ORDER",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", order=True)),
"CREATE SEQUENCE foo_seq ORDER",
)
self.assert_compile(
DropSequence(Sequence("foo_seq")), "DROP SEQUENCE foo_seq"
)
class SequenceExecTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
@classmethod
def setup_class(cls):
cls.seq = Sequence("my_sequence")
cls.seq.create(testing.db)
@classmethod
def teardown_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, util.int_types)
assert ret > 0
def test_implicit_connectionless(self):
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.execute())
def test_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(s.execute(testing.db))
def test_explicit_optional(self):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set """
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(s.execute(testing.db))
def test_func_implicit_connectionless_execute(self):
"""test func.next_value().execute()/.scalar() works
with connectionless execution. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().execute().scalar())
def test_func_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(s.next_value()))
def test_func_implicit_connectionless_scalar(self):
"""test func.next_value().execute()/.scalar() works. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().scalar())
def test_func_embedded_select(self):
"""test can use next_value() in select column expr"""
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(select([s.next_value()])))
@testing.fails_on("oracle", "ORA-02287: sequence number not allowed here")
@testing.provide_metadata
def test_func_embedded_whereclause(self):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
testing.db.execute(t1.insert(), [{"x": 1}, {"x": 300}, {"x": 301}])
s = Sequence("my_sequence")
eq_(
testing.db.execute(
t1.select().where(t1.c.x > s.next_value())
).fetchall(),
[(300,), (301,)],
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
s = Sequence("my_sequence")
testing.db.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(testing.db.scalar(t1.select()))
@testing.provide_metadata
def test_inserted_pk_no_returning(self):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
metadata = self.metadata
e = engines.testing_engine(options={"implicit_returning": False})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table("t", metadata, Column("x", Integer, primary_key=True))
t1.create()
r = e.execute(t1.insert().values(x=s.next_value()))
eq_(r.inserted_primary_key, [None])
@testing.requires.returning
@testing.provide_metadata
def test_inserted_pk_implicit_returning(self):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
metadata = self.metadata
e = engines.testing_engine(options={"implicit_returning": True})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table("t", metadata, Column("x", Integer, primary_key=True))
t1.create()
r = e.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(r.inserted_primary_key[0])
class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ("sequences",)
__backend__ = True
@testing.fails_on("firebird", "no FB support for start/increment")
def test_start_increment(self):
for seq in (
Sequence("foo_seq"),
Sequence("foo_seq", start=8),
Sequence("foo_seq", increment=5),
):
seq.create(testing.db)
try:
values = [testing.db.execute(seq) for i in range(3)]
start = seq.start or 1
inc = seq.increment or 1
assert values == list(range(start, start + inc * 3, inc))
finally:
seq.drop(testing.db)
def _has_sequence(self, name):
return testing.db.dialect.has_sequence(testing.db, name)
def test_nextval_render(self):
"""test dialect renders the "nextval" construct,
whether or not "optional" is set """
for s in (Sequence("my_seq"), Sequence("my_seq", optional=True)):
assert str(s.next_value().compile(dialect=testing.db.dialect)) in (
"nextval('my_seq')",
"gen_id(my_seq, 1)",
"my_seq.nextval",
)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = Sequence("my_seq")
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d,
)
def test_checkfirst_sequence(self):
s = Sequence("my_sequence")
s.create(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
s.create(testing.db, checkfirst=True)
s.drop(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
s.drop(testing.db, checkfirst=True)
def test_checkfirst_metadata(self):
m = MetaData()
Sequence("my_sequence", metadata=m)
m.create_all(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
m.create_all(testing.db, checkfirst=True)
m.drop_all(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
m.drop_all(testing.db, checkfirst=True)
def test_checkfirst_table(self):
m = MetaData()
s = Sequence("my_sequence")
t = Table("t", m, Column("c", Integer, s, primary_key=True))
t.create(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
t.create(testing.db, checkfirst=True)
t.drop(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
t.drop(testing.db, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self):
metadata = self.metadata
Sequence("s1", metadata=metadata)
s2 = Sequence("s2", metadata=metadata)
s3 = Sequence("s3")
t = Table("t", metadata, Column("c", Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(testing.db, checkfirst=True)
s3.drop(testing.db)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(testing.db)
assert self._has_sequence("s1")
assert self._has_sequence("s2")
assert not self._has_sequence("s3")
s2.drop(testing.db)
assert self._has_sequence("s1")
assert not self._has_sequence("s2")
metadata.drop_all(testing.db)
assert not self._has_sequence("s1")
assert not self._has_sequence("s2")
@testing.requires.returning
@testing.provide_metadata
def test_freestanding_sequence_via_autoinc(self):
t = Table(
"some_table",
self.metadata,
Column(
"id",
Integer,
autoincrement=True,
primary_key=True,
default=Sequence(
"my_sequence", metadata=self.metadata
).next_value(),
),
)
self.metadata.create_all(testing.db)
result = testing.db.execute(t.insert())
eq_(result.inserted_primary_key, [1])
cartitems = sometable = metadata = None
class TableBoundSequenceTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
@classmethod
def setup_class(cls):
global cartitems, sometable, metadata
metadata = MetaData(testing.db)
cartitems = Table(
"cartitems",
metadata,
Column(
"cart_id", Integer, Sequence("cart_id_seq"), primary_key=True
),
Column("description", String(40)),
Column("createdate", sa.DateTime()),
)
sometable = Table(
"Manager",
metadata,
Column("obj_id", Integer, Sequence("obj_id_seq")),
Column("name", String(128)),
Column(
"id",
Integer,
Sequence("Manager_id_seq", optional=True),
primary_key=True,
),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_insert_via_seq(self):
cartitems.insert().execute(description="hi")
cartitems.insert().execute(description="there")
r = cartitems.insert().execute(description="lala")
assert r.inserted_primary_key and r.inserted_primary_key[0] is not None
id_ = r.inserted_primary_key[0]
eq_(
1,
sa.select(
[func.count(cartitems.c.cart_id)],
sa.and_(
cartitems.c.description == "lala",
cartitems.c.cart_id == id_,
),
).scalar(),
)
cartitems.select().execute().fetchall()
def test_seq_nonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
engine = engines.testing_engine(options={"implicit_returning": False})
result = engine.execute(sometable.insert(), name="somename")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
result = engine.execute(sometable.insert(), name="someother")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
sometable.insert().execute({"name": "name3"}, {"name": "name4"})
eq_(
sometable.select().order_by(sometable.c.id).execute().fetchall(),
[
(1, "somename", 1),
(2, "someother", 2),
(3, "name3", 3),
(4, "name4", 4),
],
)
class SequenceAsServerDefaultTest(
testing.AssertsExecutionResults, fixtures.TablesTest
):
__requires__ = ("sequences_as_server_defaults",)
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
m = metadata
s = Sequence("t_seq", metadata=m)
Table(
"t_seq_test",
m,
Column("id", Integer, s, server_default=s.next_value()),
Column("data", String(50)),
)
s2 = Sequence("t_seq_2", metadata=m)
Table(
"t_seq_test_2",
m,
Column("id", Integer, server_default=s2.next_value()),
Column("data", String(50)),
)
def test_default_textual_w_default(self):
with testing.db.connect() as conn:
conn.execute("insert into t_seq_test (data) values ('some data')")
eq_(conn.scalar("select id from t_seq_test"), 1)
def test_default_core_w_default(self):
t_seq_test = self.tables.t_seq_test
with testing.db.connect() as conn:
conn.execute(t_seq_test.insert().values(data="some data"))
eq_(conn.scalar(select([t_seq_test.c.id])), 1)
def test_default_textual_server_only(self):
with testing.db.connect() as conn:
conn.execute(
"insert into t_seq_test_2 (data) values ('some data')"
)
eq_(conn.scalar("select id from t_seq_test_2"), 1)
def test_default_core_server_only(self):
t_seq_test = self.tables.t_seq_test_2
with testing.db.connect() as conn:
conn.execute(t_seq_test.insert().values(data="some data"))
eq_(conn.scalar(select([t_seq_test.c.id])), 1)
def test_drop_ordering(self):
self.assert_sql_execution(
testing.db,
lambda: self.metadata.drop_all(checkfirst=False),
AllOf(
CompiledSQL("DROP TABLE t_seq_test_2", {}),
EachOf(
CompiledSQL("DROP TABLE t_seq_test", {}),
CompiledSQL(
"DROP SEQUENCE t_seq", # dropped as part of t_seq_test
{},
),
),
),
CompiledSQL(
"DROP SEQUENCE t_seq_2", # dropped as part of metadata level
{},
),
)
class SpecialTypePKTest(fixtures.TestBase):
"""test process_result_value in conjunction with primary key columns.
Also tests that "autoincrement" checks are against
column.type._type_affinity, rather than the class of "type" itself.
"""
__backend__ = True
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop("implicit_returning", True)
kw["primary_key"] = True
if kw.get("autoincrement", True):
kw["test_needs_autoincrement"] = True
t = Table(
"x",
metadata,
Column("y", self.MyInteger, *arg, **kw),
Column("data", Integer),
implicit_returning=implicit_returning,
)
t.create()
r = t.insert().values(data=5).execute()
# we don't pre-fetch 'server_default'.
if "server_default" in kw and (
not testing.db.dialect.implicit_returning or not implicit_returning
):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ["INT_1"])
r.close()
eq_(t.select().execute().first(), ("INT_1", 5))
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(
default=literal("INT_1", type_=self.MyInteger).label("foo")
)
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_literal_column_default_no_label(self):
self._run_test(default=literal_column("1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence("foo_seq"))
def test_text_clause_default_no_type(self):
self._run_test(default=text("1"))
def test_server_default(self):
self._run_test(server_default="1")
def test_server_default_no_autoincrement(self):
self._run_test(server_default="1", autoincrement=False)
def test_clause(self):
stmt = select([cast("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default="1", autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
__backend__ = True
@testing.provide_metadata
def test_string_default_none_on_insert(self):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
eq_(t.select().execute().fetchall(), [("key_one", "data")])
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self):
"""With implicit_returning, we get a string PK default back no
problem."""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, ["key_one"])
eq_(t.select().execute().fetchall(), [("key_one", "data")])
@testing.provide_metadata
def test_int_default_none_on_insert(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(t.select().execute().fetchall(), [(1, "data")])
else:
eq_(t.select().execute().fetchall(), [(5, "data")])
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table("x", m2, autoload=True, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self):
metadata = self.metadata
Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table("x", m2, autoload=True, implicit_returning=False)
r = t2.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(t2.select().execute().fetchall(), [(1, "data")])
else:
eq_(t2.select().execute().fetchall(), [(5, "data")])
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [5])
eq_(t.select().execute().fetchall(), [(5, "data")])
class UnicodeDefaultsTest(fixtures.TestBase):
__backend__ = True
def test_no_default(self):
Column(Unicode(32))
def test_unicode_default(self):
default = u("foo")
Column(Unicode(32), default=default)
def test_nonunicode_default(self):
default = b("foo")
assert_raises_message(
sa.exc.SAWarning,
"Unicode column 'foobar' has non-unicode "
"default value b?'foo' specified.",
Column,
"foobar",
Unicode(32),
default=default,
)
class InsertFromSelectTest(fixtures.TestBase):
__backend__ = True
def _fixture(self):
data = Table(
"data", self.metadata, Column("x", Integer), Column("y", Integer)
)
data.create()
testing.db.execute(data.insert(), {"x": 2, "y": 5}, {"x": 7, "y": 12})
return data
@testing.provide_metadata
def test_insert_from_select_override_defaults(self):
data = self._fixture()
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=12),
Column("y", Integer),
)
table.create()
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
testing.db.execute(ins)
eq_(
testing.db.execute(table.select().order_by(table.c.x)).fetchall(),
[(2, 12, 5), (7, 12, 12)],
)
@testing.provide_metadata
def test_insert_from_select_fn_defaults(self):
data = self._fixture()
counter = itertools.count(1)
def foo(ctx):
return next(counter)
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=foo),
Column("y", Integer),
)
table.create()
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
testing.db.execute(ins)
# counter is only called once!
eq_(
testing.db.execute(table.select().order_by(table.c.x)).fetchall(),
[(2, 1, 5), (7, 1, 12)],
)
class CurrentParametersTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
def gen_default(context):
pass
Table(
"some_table",
metadata,
Column("x", String(50), default=gen_default),
Column("y", String(50)),
)
def _fixture(self, fn):
def gen_default(context):
fn(context)
some_table = self.tables.some_table
some_table.c.x.default.arg = gen_default
return fn
def _test(self, exec_type, usemethod):
collect = mock.Mock()
@self._fixture
def fn(context):
collect(context.get_current_parameters())
table = self.tables.some_table
if exec_type in ("multivalues", "executemany"):
parameters = [{"y": "h1"}, {"y": "h2"}]
else:
parameters = [{"y": "hello"}]
if exec_type == "multivalues":
stmt, params = table.insert().values(parameters), {}
else:
stmt, params = table.insert(), parameters
with testing.db.connect() as conn:
conn.execute(stmt, params)
eq_(
collect.mock_calls,
[mock.call({"y": param["y"], "x": None}) for param in parameters],
)
def test_single_w_attribute(self):
self._test("single", "attribute")
def test_single_w_method(self):
self._test("single", "method")
def test_executemany_w_attribute(self):
self._test("executemany", "attribute")
def test_executemany_w_method(self):
self._test("executemany", "method")
@testing.requires.multivalues_inserts
def test_multivalued_w_method(self):
self._test("multivalues", "method")
| import datetime
import itertools
import sqlalchemy as sa
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import DateTime
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.dialects import sqlite
from sqlalchemy.schema import CreateSequence
from sqlalchemy.schema import CreateTable
from sqlalchemy.schema import DropSequence
from sqlalchemy.sql import literal_column
from sqlalchemy.sql import select
from sqlalchemy.sql import text
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.types import TypeDecorator
from sqlalchemy.types import TypeEngine
from sqlalchemy.util import b
from sqlalchemy.util import u
t = f = f2 = ts = currenttime = metadata = default_generator = None
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_string(self):
# note: that the datatype is an Integer here doesn't matter,
# the server_default is interpreted independently of the
# column's datatype.
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5')"
)
def test_string_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default="5'6"))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT '5''6')"
)
def test_text(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 + 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 + 8)"
)
def test_text_w_quotes(self):
m = MetaData()
t = Table("t", m, Column("x", Integer, server_default=text("5 ' 8")))
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 5 ' 8)"
)
def test_literal_binds_w_quotes(self):
m = MetaData()
t = Table(
"t", m, Column("x", Integer, server_default=literal("5 ' 8"))
)
self.assert_compile(
CreateTable(t), """CREATE TABLE t (x INTEGER DEFAULT '5 '' 8')"""
)
def test_text_literal_binds(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x", Integer, server_default=text("q + :x1").bindparams(x1=7)
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT q + 7)"
)
def test_sqlexpr(self):
m = MetaData()
t = Table(
"t",
m,
Column(
"x",
Integer,
server_default=literal_column("a") + literal_column("b"),
),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT a + b)"
)
def test_literal_binds_plain(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, server_default=literal("a") + literal("b")),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT 'a' || 'b')"
)
def test_literal_binds_pgarray(self):
from sqlalchemy.dialects.postgresql import ARRAY, array
m = MetaData()
t = Table(
"t",
m,
Column("x", ARRAY(Integer), server_default=array([1, 2, 3])),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x INTEGER[] DEFAULT ARRAY[1, 2, 3])",
dialect="postgresql",
)
class DefaultTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global t, f, f2, ts, currenttime, metadata, default_generator
db = testing.db
metadata = MetaData(db)
default_generator = {"x": 50}
def mydefault():
default_generator["x"] += 1
return default_generator["x"]
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text("13")])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
return conn.execute(sa.select([sa.text("12")])).scalar()
finally:
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
use_function_defaults = testing.against("postgresql", "mssql")
is_oracle = testing.against("oracle")
class MyClass(object):
@classmethod
def gen_default(cls, ctx):
return "hi"
class MyType(TypeDecorator):
impl = String(50)
def process_bind_param(self, value, dialect):
if value is not None:
value = "BIND" + value
return value
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
currenttime = func.current_date(type_=sa.Date, bind=db)
if is_oracle:
ts = db.scalar(
sa.select(
[
func.trunc(
func.current_timestamp(),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
]
)
)
assert isinstance(ts, datetime.date) and not isinstance(
ts, datetime.datetime
)
f = sa.select([func.length("abcdef")], bind=db).scalar()
f2 = sa.select([func.length("abcdefghijk")], bind=db).scalar()
# TODO: engine propagation across nested functions not working
currenttime = func.trunc(
currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date
)
def1 = currenttime
def2 = func.trunc(
sa.text("current_timestamp"),
sa.literal_column("'DAY'"),
type_=sa.Date,
)
deftype = sa.Date
elif use_function_defaults:
f = sa.select([func.length("abcdef")], bind=db).scalar()
f2 = sa.select([func.length("abcdefghijk")], bind=db).scalar()
def1 = currenttime
deftype = sa.Date
if testing.against("mssql"):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = db.scalar(func.current_date())
else:
f = len("abcdef")
f2 = len("abcdefghijk")
def1 = def2 = "3"
ts = 3
deftype = Integer
t = Table(
"default_test1",
metadata,
# python function
Column("col1", Integer, primary_key=True, default=mydefault),
# python literal
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
# preexecute expression
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
# SQL-side default from sql expression
Column("col4", deftype, server_default=def1),
# SQL-side default from literal expression
Column("col5", deftype, server_default=def2),
# preexecute + update timestamp
Column("col6", sa.Date, default=currenttime, onupdate=currenttime),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
# python function which uses ExecutionContext
Column(
"col7",
Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx,
),
# python builtin
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
# combo
Column("col9", String(20), default="py", server_default="ddl"),
# python method w/ context
Column("col10", String(20), default=MyClass.gen_default),
# fixed default w/ type that has bound processor
Column("col11", MyType(), default="foo"),
)
t.create()
@classmethod
def teardown_class(cls):
t.drop()
def teardown(self):
default_generator["x"] = 50
t.delete().execute()
def test_bad_arg_signature(self):
ex_msg = (
"ColumnDefault Python function takes zero "
"or one positional arguments"
)
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(
sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn
)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a(object):
def __init__(self, x):
eq_(x, "context")
class fn6b(object):
def __init__(self, x, y=3):
eq_(x, "context")
class FN7(object):
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
@testing.fails_on("firebird", "Data type unknown")
def test_standalone(self):
c = testing.db.engine.connect()
x = c.execute(t.c.col1.default)
y = t.c.col2.default.execute()
z = c.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, "imthedefault")
eq_(z, f)
eq_(f2, 11)
def test_py_vs_server_default_detection(self):
def has_(name, *wanted):
slots = [
"default",
"onupdate",
"server_default",
"server_onupdate",
]
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
tbl = t
has_("col1", "default")
has_("col2", "default", "onupdate")
has_("col3", "default", "onupdate")
has_("col4", "server_default")
has_("col5", "server_default")
has_("col6", "default", "onupdate")
has_("boolcol1", "default")
has_("boolcol2", "default")
has_("col7", "default", "onupdate")
has_("col8", "default", "onupdate")
has_("col9", "default", "server_default")
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
t2 = Table(
"t2",
MetaData(),
Column("col1", Integer, Sequence("foo")),
Column(
"col2", Integer, default=Sequence("foo"), server_default="y"
),
Column("col3", Integer, Sequence("foo"), server_default="x"),
Column("col4", Integer, ColumnDefault("x"), DefaultClause("y")),
Column(
"col4",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
DefaultClause("y", for_update=True),
),
Column(
"col5",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
onupdate="z",
),
Column(
"col6",
Integer,
ColumnDefault("x"),
server_default="y",
onupdate="z",
),
Column(
"col7", Integer, default="x", server_default="y", onupdate="z"
),
Column(
"col8",
Integer,
server_onupdate="u",
default="x",
server_default="y",
onupdate="z",
),
)
tbl = t2
has_("col1", "default")
has_("col2", "default", "server_default")
has_("col3", "default", "server_default")
has_("col4", "default", "server_default", "server_onupdate")
has_("col5", "default", "server_default", "onupdate")
has_("col6", "default", "server_default", "onupdate")
has_("col7", "default", "server_default", "onupdate")
has_(
"col8", "default", "server_default", "onupdate", "server_onupdate"
)
@testing.fails_on("firebird", "Data type unknown")
def test_insert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
t.insert().execute()
ctexec = sa.select(
[currenttime.label("now")], bind=testing.db
).scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
x,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
)
for x in range(51, 54)
],
)
t.insert().execute(col9=None)
assert r.lastrow_has_defaults()
eq_(
set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]),
)
eq_(
t.select(t.c.col1 == 54).execute().fetchall(),
[
(
54,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
None,
"hi",
"BINDfoo",
)
],
)
def test_insertmany(self):
t.insert().execute({}, {}, {})
ctexec = currenttime.scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.requires.multivalues_inserts
def test_insert_multivalues(self):
t.insert().values([{}, {}, {}]).execute()
ctexec = currenttime.scalar()
result = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"imthedefault",
f,
ts,
ts,
ctexec,
True,
False,
12,
today,
"py",
"hi",
"BINDfoo",
),
],
)
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
for const in (
sa.Sequence("y"),
sa.ColumnDefault("y"),
sa.DefaultClause("y"),
):
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression object expected, got object of type "
"<.* 'list'> instead",
t.select,
[const],
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.insert().values(col4=const),
)
assert_raises_message(
sa.exc.InvalidRequestError,
"cannot be used directly as a column expression.",
str,
t.update().values(col4=const),
)
def test_missing_many_param(self):
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'col7', in parameter "
"group 1",
t.insert().execute,
{"col4": 7, "col7": 12, "col8": 19},
{"col4": 7, "col8": 19},
{"col4": 7, "col7": 12, "col8": 19},
)
def test_insert_values(self):
t.insert(values={"col3": 50}).execute()
result = t.select().execute()
eq_(50, result.first()["col3"])
@testing.fails_on("firebird", "Data type unknown")
def test_updatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if testing.against(
"mysql+mysqldb"
) and testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2):
return
t.insert().execute({}, {}, {})
t.update(t.c.col1 == sa.bindparam("pkval")).execute(
{"pkval": 51, "col7": None, "col8": None, "boolcol1": False}
)
t.update(t.c.col1 == sa.bindparam("pkval")).execute(
{"pkval": 51}, {"pkval": 52}, {"pkval": 53}
)
result = t.select().execute()
ctexec = currenttime.scalar()
today = datetime.date.today()
eq_(
result.fetchall(),
[
(
51,
"im the update",
f2,
ts,
ts,
ctexec,
False,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
52,
"im the update",
f2,
ts,
ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
(
53,
"im the update",
f2,
ts,
ts,
ctexec,
True,
False,
13,
today,
"py",
"hi",
"BINDfoo",
),
],
)
@testing.fails_on("firebird", "Data type unknown")
def test_update(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk).execute(col4=None, col5=None)
ctexec = currenttime.scalar()
result = t.select(t.c.col1 == pk).execute()
result = result.first()
eq_(
result,
(
pk,
"im the update",
f2,
None,
None,
ctexec,
True,
False,
13,
datetime.date.today(),
"py",
"hi",
"BINDfoo",
),
)
eq_(11, f2)
@testing.fails_on("firebird", "Data type unknown")
def test_update_values(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1 == pk, values={"col3": 55}).execute()
result = t.select(t.c.col1 == pk).execute()
result = result.first()
eq_(55, result["col3"])
class CTEDefaultTest(fixtures.TablesTest):
__requires__ = ("ctes", "returning", "ctes_on_dml")
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"q",
metadata,
Column("x", Integer, default=2),
Column("y", Integer, onupdate=5),
Column("z", Integer),
)
Table(
"p",
metadata,
Column("s", Integer),
Column("t", Integer),
Column("u", Integer, onupdate=1),
)
def _test_a_in_b(self, a, b):
q = self.tables.q
p = self.tables.p
with testing.db.connect() as conn:
if a == "delete":
conn.execute(q.insert().values(y=10, z=1))
cte = q.delete().where(q.c.z == 1).returning(q.c.z).cte("c")
expected = None
elif a == "insert":
cte = q.insert().values(z=1, y=10).returning(q.c.z).cte("c")
expected = (2, 10)
elif a == "update":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = (
q.update()
.where(q.c.z == 1)
.values(x=7)
.returning(q.c.z)
.cte("c")
)
expected = (7, 5)
elif a == "select":
conn.execute(q.insert().values(x=5, y=10, z=1))
cte = sa.select([q.c.z]).cte("c")
expected = (5, 10)
if b == "select":
conn.execute(p.insert().values(s=1))
stmt = select([p.c.s, cte.c.z])
elif b == "insert":
sel = select([1, cte.c.z])
stmt = (
p.insert()
.from_select(["s", "t"], sel)
.returning(p.c.s, p.c.t)
)
elif b == "delete":
stmt = (
p.insert().values(s=1, t=cte.c.z).returning(p.c.s, cte.c.z)
)
elif b == "update":
conn.execute(p.insert().values(s=1))
stmt = (
p.update()
.values(t=5)
.where(p.c.s == cte.c.z)
.returning(p.c.u, cte.c.z)
)
eq_(conn.execute(stmt).fetchall(), [(1, 1)])
eq_(conn.execute(select([q.c.x, q.c.y])).fetchone(), expected)
@testing.requires.ctes_on_dml
def test_update_in_select(self):
self._test_a_in_b("update", "select")
@testing.requires.ctes_on_dml
def test_delete_in_select(self):
self._test_a_in_b("update", "select")
@testing.requires.ctes_on_dml
def test_insert_in_select(self):
self._test_a_in_b("update", "select")
def test_select_in_update(self):
self._test_a_in_b("select", "update")
def test_select_in_insert(self):
self._test_a_in_b("select", "insert")
# TODO: updates / inserts can be run in one statement w/ CTE ?
# deletes?
class PKDefaultTest(fixtures.TablesTest):
__requires__ = ("subqueries",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
t2 = Table("t2", metadata, Column("nextid", Integer))
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar(),
),
Column("data", String(30)),
)
Table(
"date_table",
metadata,
Column(
"date_id",
DateTime,
default=text("current_timestamp"),
primary_key=True,
),
)
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
def _test(self, returning):
t2, t1, date_table = (
self.tables.t2,
self.tables.t1,
self.tables.date_table,
)
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(
options={"implicit_returning": returning}
)
with engine.begin() as conn:
conn.execute(t2.insert(), nextid=1)
r = conn.execute(t1.insert(), data="hi")
eq_([1], r.inserted_primary_key)
conn.execute(t2.insert(), nextid=2)
r = conn.execute(t1.insert(), data="there")
eq_([2], r.inserted_primary_key)
r = conn.execute(date_table.insert())
assert isinstance(r.inserted_primary_key[0], datetime.datetime)
class PKIncrementTest(fixtures.TablesTest):
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"aitable",
metadata,
Column(
"id",
Integer,
Sequence("ai_id_seq", optional=True),
primary_key=True,
),
Column("int1", Integer),
Column("str1", String(20)),
)
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on("firebird", "Data type unknown")
def _test_autoincrement(self, bind):
aitable = self.tables.aitable
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1="row 2")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1="row 3")
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={"int1": func.length("four")}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1, 2, 3, 4]))
eq_(
list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, "row 2"), (3, 3, "row 3"), (4, 4, None)],
)
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
def test_autoincrement_transaction(self):
con = testing.db.connect()
tx = con.begin()
try:
try:
self._test_autoincrement(con)
except Exception:
try:
tx.rollback()
except Exception:
pass
raise
else:
tx.commit()
finally:
con.close()
class EmptyInsertTest(fixtures.TestBase):
__backend__ = True
@testing.exclude("sqlite", "<", (3, 3, 8), "no empty insert support")
@testing.fails_on("oracle", "FIXME: unknown")
@testing.provide_metadata
def test_empty_insert(self):
t1 = Table(
"t1",
self.metadata,
Column("is_true", Boolean, server_default=("1")),
)
self.metadata.create_all()
t1.insert().execute()
eq_(1, select([func.count(text("*"))], from_obj=t1).scalar())
eq_(True, t1.select().scalar())
class AutoIncrementTest(fixtures.TablesTest):
__requires__ = ("identity",)
run_define_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
"""Each test manipulates self.metadata individually."""
@testing.exclude("sqlite", "<", (3, 4), "no database support")
def test_autoincrement_single_col(self):
single = Table(
"single", self.metadata, Column("id", Integer, primary_key=True)
)
single.create()
r = single.insert().execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(1, sa.select([func.count(sa.text("*"))], from_obj=single).scalar())
def test_autoincrement_fk(self):
nodes = Table(
"nodes",
self.metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("nodes.id")),
Column("data", String(30)),
)
nodes.create()
r = nodes.insert().execute(data="foo")
id_ = r.inserted_primary_key[0]
nodes.insert().execute(data="bar", parent_id=id_)
def test_autoinc_detection_no_affinity(self):
class MyType(TypeDecorator):
impl = TypeEngine
assert MyType()._type_affinity is None
t = Table("x", MetaData(), Column("id", MyType(), primary_key=True))
assert t._autoincrement_column is None
def test_autoincrement_ignore_fk(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column(
"id",
Integer(),
ForeignKey("y.id"),
autoincrement="ignore_fk",
primary_key=True,
),
)
assert x._autoincrement_column is x.c.id
def test_autoincrement_fk_disqualifies(self):
m = MetaData()
Table("y", m, Column("id", Integer(), primary_key=True))
x = Table(
"x",
m,
Column("id", Integer(), ForeignKey("y.id"), primary_key=True),
)
assert x._autoincrement_column is None
@testing.only_on("sqlite")
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table(
"nonaitest",
self.metadata,
Column("id", Integer, autoincrement=False, primary_key=True),
Column("data", String(20)),
)
nonai.create()
def go():
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
nonai.insert().execute(data="row 1")
nonai.insert().execute(data="row 2")
# just testing SQLite for now, it passes
with expect_warnings(".*has no Python-side or server-side default.*"):
go()
def test_col_w_sequence_non_autoinc_no_firing(self):
metadata = self.metadata
# plain autoincrement/PK table in the actual schema
Table("x", metadata, Column("set_id", Integer, primary_key=True))
metadata.create_all()
# for the INSERT use a table with a Sequence
# and autoincrement=False. Using a ForeignKey
# would have the same effect
dataset_no_autoinc = Table(
"x",
MetaData(),
Column(
"set_id",
Integer,
Sequence("some_seq"),
primary_key=True,
autoincrement=False,
),
)
testing.db.execute(dataset_no_autoinc.insert())
eq_(
testing.db.scalar(
select([func.count("*")]).select_from(dataset_no_autoinc)
),
1,
)
class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
__backend__ = True
def test_create_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence("foo_seq")), "CREATE SEQUENCE foo_seq"
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, minvalue=0)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 MINVALUE 0",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, maxvalue=5)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 MAXVALUE 5",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=1, nomaxvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 1 NO MAXVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", increment=2, start=0, nominvalue=True)
),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 0 NO MINVALUE",
)
self.assert_compile(
CreateSequence(
Sequence("foo_seq", start=1, maxvalue=10, cycle=True)
),
"CREATE SEQUENCE foo_seq START WITH 1 MAXVALUE 10 CYCLE",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", cache=1000, order=True)),
"CREATE SEQUENCE foo_seq CACHE 1000 ORDER",
)
self.assert_compile(
CreateSequence(Sequence("foo_seq", order=True)),
"CREATE SEQUENCE foo_seq ORDER",
)
self.assert_compile(
DropSequence(Sequence("foo_seq")), "DROP SEQUENCE foo_seq"
)
class SequenceExecTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
@classmethod
def setup_class(cls):
cls.seq = Sequence("my_sequence")
cls.seq.create(testing.db)
@classmethod
def teardown_class(cls):
cls.seq.drop(testing.db)
def _assert_seq_result(self, ret):
"""asserts return of next_value is an int"""
assert isinstance(ret, util.int_types)
assert ret > 0
def test_implicit_connectionless(self):
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.execute())
def test_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(s.execute(testing.db))
def test_explicit_optional(self):
"""test dialect executes a Sequence, returns nextval, whether
or not "optional" is set """
s = Sequence("my_sequence", optional=True)
self._assert_seq_result(s.execute(testing.db))
def test_func_implicit_connectionless_execute(self):
"""test func.next_value().execute()/.scalar() works
with connectionless execution. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().execute().scalar())
def test_func_explicit(self):
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(s.next_value()))
def test_func_implicit_connectionless_scalar(self):
"""test func.next_value().execute()/.scalar() works. """
s = Sequence("my_sequence", metadata=MetaData(testing.db))
self._assert_seq_result(s.next_value().scalar())
def test_func_embedded_select(self):
"""test can use next_value() in select column expr"""
s = Sequence("my_sequence")
self._assert_seq_result(testing.db.scalar(select([s.next_value()])))
@testing.fails_on("oracle", "ORA-02287: sequence number not allowed here")
@testing.provide_metadata
def test_func_embedded_whereclause(self):
"""test can use next_value() in whereclause"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
testing.db.execute(t1.insert(), [{"x": 1}, {"x": 300}, {"x": 301}])
s = Sequence("my_sequence")
eq_(
testing.db.execute(
t1.select().where(t1.c.x > s.next_value())
).fetchall(),
[(300,), (301,)],
)
@testing.provide_metadata
def test_func_embedded_valuesbase(self):
"""test can use next_value() in values() of _ValuesBase"""
metadata = self.metadata
t1 = Table("t", metadata, Column("x", Integer))
t1.create(testing.db)
s = Sequence("my_sequence")
testing.db.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(testing.db.scalar(t1.select()))
@testing.provide_metadata
def test_inserted_pk_no_returning(self):
"""test inserted_primary_key contains [None] when
pk_col=next_value(), implicit returning is not used."""
metadata = self.metadata
e = engines.testing_engine(options={"implicit_returning": False})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table("t", metadata, Column("x", Integer, primary_key=True))
t1.create()
r = e.execute(t1.insert().values(x=s.next_value()))
eq_(r.inserted_primary_key, [None])
@testing.requires.returning
@testing.provide_metadata
def test_inserted_pk_implicit_returning(self):
"""test inserted_primary_key contains the result when
pk_col=next_value(), when implicit returning is used."""
metadata = self.metadata
e = engines.testing_engine(options={"implicit_returning": True})
s = Sequence("my_sequence")
metadata.bind = e
t1 = Table("t", metadata, Column("x", Integer, primary_key=True))
t1.create()
r = e.execute(t1.insert().values(x=s.next_value()))
self._assert_seq_result(r.inserted_primary_key[0])
class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ("sequences",)
__backend__ = True
@testing.fails_on("firebird", "no FB support for start/increment")
def test_start_increment(self):
for seq in (
Sequence("foo_seq"),
Sequence("foo_seq", start=8),
Sequence("foo_seq", increment=5),
):
seq.create(testing.db)
try:
values = [testing.db.execute(seq) for i in range(3)]
start = seq.start or 1
inc = seq.increment or 1
assert values == list(range(start, start + inc * 3, inc))
finally:
seq.drop(testing.db)
def _has_sequence(self, name):
return testing.db.dialect.has_sequence(testing.db, name)
def test_nextval_render(self):
"""test dialect renders the "nextval" construct,
whether or not "optional" is set """
for s in (Sequence("my_seq"), Sequence("my_seq", optional=True)):
assert str(s.next_value().compile(dialect=testing.db.dialect)) in (
"nextval('my_seq')",
"gen_id(my_seq, 1)",
"my_seq.nextval",
)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = Sequence("my_seq")
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d,
)
def test_checkfirst_sequence(self):
s = Sequence("my_sequence")
s.create(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
s.create(testing.db, checkfirst=True)
s.drop(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
s.drop(testing.db, checkfirst=True)
def test_checkfirst_metadata(self):
m = MetaData()
Sequence("my_sequence", metadata=m)
m.create_all(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
m.create_all(testing.db, checkfirst=True)
m.drop_all(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
m.drop_all(testing.db, checkfirst=True)
def test_checkfirst_table(self):
m = MetaData()
s = Sequence("my_sequence")
t = Table("t", m, Column("c", Integer, s, primary_key=True))
t.create(testing.db, checkfirst=False)
assert self._has_sequence("my_sequence")
t.create(testing.db, checkfirst=True)
t.drop(testing.db, checkfirst=False)
assert not self._has_sequence("my_sequence")
t.drop(testing.db, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self):
metadata = self.metadata
Sequence("s1", metadata=metadata)
s2 = Sequence("s2", metadata=metadata)
s3 = Sequence("s3")
t = Table("t", metadata, Column("c", Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(testing.db, checkfirst=True)
s3.drop(testing.db)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(testing.db)
assert self._has_sequence("s1")
assert self._has_sequence("s2")
assert not self._has_sequence("s3")
s2.drop(testing.db)
assert self._has_sequence("s1")
assert not self._has_sequence("s2")
metadata.drop_all(testing.db)
assert not self._has_sequence("s1")
assert not self._has_sequence("s2")
@testing.requires.returning
@testing.provide_metadata
def test_freestanding_sequence_via_autoinc(self):
t = Table(
"some_table",
self.metadata,
Column(
"id",
Integer,
autoincrement=True,
primary_key=True,
default=Sequence(
"my_sequence", metadata=self.metadata
).next_value(),
),
)
self.metadata.create_all(testing.db)
result = testing.db.execute(t.insert())
eq_(result.inserted_primary_key, [1])
cartitems = sometable = metadata = None
class TableBoundSequenceTest(fixtures.TestBase):
__requires__ = ("sequences",)
__backend__ = True
@classmethod
def setup_class(cls):
global cartitems, sometable, metadata
metadata = MetaData(testing.db)
cartitems = Table(
"cartitems",
metadata,
Column(
"cart_id", Integer, Sequence("cart_id_seq"), primary_key=True
),
Column("description", String(40)),
Column("createdate", sa.DateTime()),
)
sometable = Table(
"Manager",
metadata,
Column("obj_id", Integer, Sequence("obj_id_seq")),
Column("name", String(128)),
Column(
"id",
Integer,
Sequence("Manager_id_seq", optional=True),
primary_key=True,
),
)
metadata.create_all()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_insert_via_seq(self):
cartitems.insert().execute(description="hi")
cartitems.insert().execute(description="there")
r = cartitems.insert().execute(description="lala")
assert r.inserted_primary_key and r.inserted_primary_key[0] is not None
id_ = r.inserted_primary_key[0]
eq_(
1,
sa.select(
[func.count(cartitems.c.cart_id)],
sa.and_(
cartitems.c.description == "lala",
cartitems.c.cart_id == id_,
),
).scalar(),
)
cartitems.select().execute().fetchall()
def test_seq_nonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
engine = engines.testing_engine(options={"implicit_returning": False})
result = engine.execute(sometable.insert(), name="somename")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
result = engine.execute(sometable.insert(), name="someother")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
sometable.insert().execute({"name": "name3"}, {"name": "name4"})
eq_(
sometable.select().order_by(sometable.c.id).execute().fetchall(),
[
(1, "somename", 1),
(2, "someother", 2),
(3, "name3", 3),
(4, "name4", 4),
],
)
class SequenceAsServerDefaultTest(
testing.AssertsExecutionResults, fixtures.TablesTest
):
__requires__ = ("sequences_as_server_defaults",)
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
m = metadata
s = Sequence("t_seq", metadata=m)
Table(
"t_seq_test",
m,
Column("id", Integer, s, server_default=s.next_value()),
Column("data", String(50)),
)
s2 = Sequence("t_seq_2", metadata=m)
Table(
"t_seq_test_2",
m,
Column("id", Integer, server_default=s2.next_value()),
Column("data", String(50)),
)
def test_default_textual_w_default(self):
with testing.db.connect() as conn:
conn.execute("insert into t_seq_test (data) values ('some data')")
eq_(conn.scalar("select id from t_seq_test"), 1)
def test_default_core_w_default(self):
t_seq_test = self.tables.t_seq_test
with testing.db.connect() as conn:
conn.execute(t_seq_test.insert().values(data="some data"))
eq_(conn.scalar(select([t_seq_test.c.id])), 1)
def test_default_textual_server_only(self):
with testing.db.connect() as conn:
conn.execute(
"insert into t_seq_test_2 (data) values ('some data')"
)
eq_(conn.scalar("select id from t_seq_test_2"), 1)
def test_default_core_server_only(self):
t_seq_test = self.tables.t_seq_test_2
with testing.db.connect() as conn:
conn.execute(t_seq_test.insert().values(data="some data"))
eq_(conn.scalar(select([t_seq_test.c.id])), 1)
def test_drop_ordering(self):
self.assert_sql_execution(
testing.db,
lambda: self.metadata.drop_all(checkfirst=False),
AllOf(
CompiledSQL("DROP TABLE t_seq_test_2", {}),
EachOf(
CompiledSQL("DROP TABLE t_seq_test", {}),
CompiledSQL(
"DROP SEQUENCE t_seq", # dropped as part of t_seq_test
{},
),
),
),
CompiledSQL(
"DROP SEQUENCE t_seq_2", # dropped as part of metadata level
{},
),
)
class SpecialTypePKTest(fixtures.TestBase):
"""test process_result_value in conjunction with primary key columns.
Also tests that "autoincrement" checks are against
column.type._type_affinity, rather than the class of "type" itself.
"""
__backend__ = True
@classmethod
def setup_class(cls):
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
if value is None:
return None
return int(value[4:])
def process_result_value(self, value, dialect):
if value is None:
return None
return "INT_%d" % value
cls.MyInteger = MyInteger
@testing.provide_metadata
def _run_test(self, *arg, **kw):
metadata = self.metadata
implicit_returning = kw.pop("implicit_returning", True)
kw["primary_key"] = True
if kw.get("autoincrement", True):
kw["test_needs_autoincrement"] = True
t = Table(
"x",
metadata,
Column("y", self.MyInteger, *arg, **kw),
Column("data", Integer),
implicit_returning=implicit_returning,
)
t.create()
r = t.insert().values(data=5).execute()
# we don't pre-fetch 'server_default'.
if "server_default" in kw and (
not testing.db.dialect.implicit_returning or not implicit_returning
):
eq_(r.inserted_primary_key, [None])
else:
eq_(r.inserted_primary_key, ["INT_1"])
r.close()
eq_(t.select().execute().first(), ("INT_1", 5))
def test_plain(self):
# among other things, tests that autoincrement
# is enabled.
self._run_test()
def test_literal_default_label(self):
self._run_test(
default=literal("INT_1", type_=self.MyInteger).label("foo")
)
def test_literal_default_no_label(self):
self._run_test(default=literal("INT_1", type_=self.MyInteger))
def test_literal_column_default_no_label(self):
self._run_test(default=literal_column("1", type_=self.MyInteger))
def test_sequence(self):
self._run_test(Sequence("foo_seq"))
def test_text_clause_default_no_type(self):
self._run_test(default=text("1"))
def test_server_default(self):
self._run_test(server_default="1")
def test_server_default_no_autoincrement(self):
self._run_test(server_default="1", autoincrement=False)
def test_clause(self):
stmt = select([cast("INT_1", type_=self.MyInteger)]).as_scalar()
self._run_test(default=stmt)
@testing.requires.returning
def test_no_implicit_returning(self):
self._run_test(implicit_returning=False)
@testing.requires.returning
def test_server_default_no_implicit_returning(self):
self._run_test(server_default="1", autoincrement=False)
class ServerDefaultsOnPKTest(fixtures.TestBase):
__backend__ = True
@testing.provide_metadata
def test_string_default_none_on_insert(self):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
eq_(t.select().execute().fetchall(), [("key_one", "data")])
@testing.requires.returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self):
"""With implicit_returning, we get a string PK default back no
problem."""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, ["key_one"])
eq_(t.select().execute().fetchall(), [("key_one", "data")])
@testing.provide_metadata
def test_int_default_none_on_insert(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(t.select().execute().fetchall(), [(1, "data")])
else:
eq_(t.select().execute().fetchall(), [(5, "data")])
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table("x", m2, autoload=True, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self):
metadata = self.metadata
Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all()
m2 = MetaData(metadata.bind)
t2 = Table("x", m2, autoload=True, implicit_returning=False)
r = t2.insert().execute(data="data")
eq_(r.inserted_primary_key, [None])
if testing.against("sqlite"):
eq_(t2.select().execute().fetchall(), [(1, "data")])
else:
eq_(t2.select().execute().fetchall(), [(5, "data")])
@testing.requires.returning
@testing.provide_metadata
def test_int_default_on_insert_with_returning(self):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
)
metadata.create_all()
r = t.insert().execute(data="data")
eq_(r.inserted_primary_key, [5])
eq_(t.select().execute().fetchall(), [(5, "data")])
class UnicodeDefaultsTest(fixtures.TestBase):
__backend__ = True
def test_no_default(self):
Column(Unicode(32))
def test_unicode_default(self):
default = u("foo")
Column(Unicode(32), default=default)
def test_nonunicode_default(self):
default = b("foo")
assert_raises_message(
sa.exc.SAWarning,
"Unicode column 'foobar' has non-unicode "
"default value b?'foo' specified.",
Column,
"foobar",
Unicode(32),
default=default,
)
class InsertFromSelectTest(fixtures.TestBase):
__backend__ = True
def _fixture(self):
data = Table(
"data", self.metadata, Column("x", Integer), Column("y", Integer)
)
data.create()
testing.db.execute(data.insert(), {"x": 2, "y": 5}, {"x": 7, "y": 12})
return data
@testing.provide_metadata
def test_insert_from_select_override_defaults(self):
data = self._fixture()
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=12),
Column("y", Integer),
)
table.create()
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
testing.db.execute(ins)
eq_(
testing.db.execute(table.select().order_by(table.c.x)).fetchall(),
[(2, 12, 5), (7, 12, 12)],
)
@testing.provide_metadata
def test_insert_from_select_fn_defaults(self):
data = self._fixture()
counter = itertools.count(1)
def foo(ctx):
return next(counter)
table = Table(
"sometable",
self.metadata,
Column("x", Integer),
Column("foo", Integer, default=foo),
Column("y", Integer),
)
table.create()
sel = select([data.c.x, data.c.y])
ins = table.insert().from_select(["x", "y"], sel)
testing.db.execute(ins)
# counter is only called once!
eq_(
testing.db.execute(table.select().order_by(table.c.x)).fetchall(),
[(2, 1, 5), (7, 1, 12)],
)
class CurrentParametersTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
def gen_default(context):
pass
Table(
"some_table",
metadata,
Column("x", String(50), default=gen_default),
Column("y", String(50)),
)
def _fixture(self, fn):
def gen_default(context):
fn(context)
some_table = self.tables.some_table
some_table.c.x.default.arg = gen_default
return fn
def _test(self, exec_type, usemethod):
collect = mock.Mock()
@self._fixture
def fn(context):
collect(context.get_current_parameters())
table = self.tables.some_table
if exec_type in ("multivalues", "executemany"):
parameters = [{"y": "h1"}, {"y": "h2"}]
else:
parameters = [{"y": "hello"}]
if exec_type == "multivalues":
stmt, params = table.insert().values(parameters), {}
else:
stmt, params = table.insert(), parameters
with testing.db.connect() as conn:
conn.execute(stmt, params)
eq_(
collect.mock_calls,
[mock.call({"y": param["y"], "x": None}) for param in parameters],
)
def test_single_w_attribute(self):
self._test("single", "attribute")
def test_single_w_method(self):
self._test("single", "method")
def test_executemany_w_attribute(self):
self._test("executemany", "attribute")
def test_executemany_w_method(self):
self._test("executemany", "method")
@testing.requires.multivalues_inserts
def test_multivalued_w_method(self):
self._test("multivalues", "method") | en | 0.763762 | # note: that the datatype is an Integer here doesn't matter, # the server_default is interpreted independently of the # column's datatype. CREATE TABLE t (x INTEGER DEFAULT '5 '' 8') # ensure a "close()" on this connection does nothing, # since its a "branched" connection # select "count(1)" returns different results on different DBs also # correct for "current_date" compatible as column default, value # differences # TODO: engine propagation across nested functions not working # python function # python literal # preexecute expression # SQL-side default from sql expression # SQL-side default from literal expression # preexecute + update timestamp # python function which uses ExecutionContext # python builtin # combo # python method w/ context # fixed default w/ type that has bound processor Using a DefaultGenerator, Sequence, DefaultClause in the columns, where clause of a select, or in the values clause of insert, update, raises an informative error # MySQL-Python 1.2.2 breaks functions in execute_many :( # TODO: updates / inserts can be run in one statement w/ CTE ? # deletes? # TODO: add coverage for increment on a secondary column in a key Each test manipulates self.metadata individually. # sqlite INT primary keys can be non-unique! (only for ints) # postgresql + mysql strict will fail on first row, # mysql in legacy mode fails on second row # just testing SQLite for now, it passes # plain autoincrement/PK table in the actual schema # for the INSERT use a table with a Sequence # and autoincrement=False. Using a ForeignKey # would have the same effect asserts return of next_value is an int test dialect executes a Sequence, returns nextval, whether or not "optional" is set test func.next_value().execute()/.scalar() works with connectionless execution. test func.next_value().execute()/.scalar() works. test can use next_value() in select column expr test can use next_value() in whereclause test can use next_value() in values() of _ValuesBase test inserted_primary_key contains [None] when pk_col=next_value(), implicit returning is not used. test inserted_primary_key contains the result when pk_col=next_value(), when implicit returning is used. test dialect renders the "nextval" construct, whether or not "optional" is set test next_value() used on non-sequence platform raises NotImplementedError. # 't' is created, and 's3' won't be # re-created since it's linked to 't'. # 's1' and 's2' are, however. test sequences fire off as defaults on non-pk columns # dropped as part of t_seq_test # dropped as part of metadata level test process_result_value in conjunction with primary key columns. Also tests that "autoincrement" checks are against column.type._type_affinity, rather than the class of "type" itself. # we don't pre-fetch 'server_default'. # among other things, tests that autoincrement # is enabled. Test that without implicit returning, we return None for a string server default. That is, we don't want to attempt to pre-execute "server_default" generically - the user should use a Python side-default for a case like this. Testing that all backends do the same thing here. With implicit_returning, we get a string PK default back no problem. # counter is only called once! | 2.367274 | 2 |
tools/pa_init_input.py | minyez/mykit | 4 | 6624920 | <reponame>minyez/mykit<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
# ====================================================
# File Name : pa_init_input.py
# Creation Date : 18-04-2018
# Created By : <NAME>
# Contact : <EMAIL>
# ====================================================
from pa_classes import abinit_input_files
from argparse import ArgumentParser
import sys
def abinit_init_input(casename, formula, xc_type, pp_type, abinit_address, nproc, mpitype):
a_input = abinit_input_files(casename, formula, xc_type, pp_type)
a_input.set_abinit_run(abinit_address, nproc, mpitype)
return a_input
# ====================================================
def Main(ArgList):
description = '''Initialize the ABINIT input files and a running script'''
parser = ArgumentParser(description=description)
parser.add_argument(dest="casename", metavar="CASE", type=str, help="Casename for the calculation")
parser.add_argument("-f", dest="formula", type=str, default='', help="Formula of the chemical system, e.g. Si1O2")
parser.add_argument('--xc', dest='xc_type', type=str, default='PBE', help="type of XC functional. Default PBE")
parser.add_argument('-n', dest='nproc', type=int, default=1, help="The number of processors used to run ABINIT. Default 1, i.e. serial calculation")
parser.add_argument('--pp', dest='pp_type', type=str, default='paw', help="type of pseudopotential. Default PAW.")
parser.add_argument('--mpi', dest='mpitype', type=str, default='mpiexec', help="Type of MPI executive. mpirun|mpiexec|yhrun")
parser.add_argument('--ae', dest='abinit_address', default='abinit', type=str, help="The path of ABINIT executive, default by searching PATH.")
# initialize options as 'opts'
opts = parser.parse_args()
xc_type = opts.xc_type.lower()
pp_type = opts.pp_type.lower()
a_input = abinit_init_input(opts.casename, opts.formula, xc_type, pp_type, \
opts.abinit_address, opts.nproc, opts.mpitype)
# ==============================
if __name__ == "__main__":
Main(sys.argv)
| #!/usr/bin/env python
# coding=utf-8
# ====================================================
# File Name : pa_init_input.py
# Creation Date : 18-04-2018
# Created By : <NAME>
# Contact : <EMAIL>
# ====================================================
from pa_classes import abinit_input_files
from argparse import ArgumentParser
import sys
def abinit_init_input(casename, formula, xc_type, pp_type, abinit_address, nproc, mpitype):
a_input = abinit_input_files(casename, formula, xc_type, pp_type)
a_input.set_abinit_run(abinit_address, nproc, mpitype)
return a_input
# ====================================================
def Main(ArgList):
description = '''Initialize the ABINIT input files and a running script'''
parser = ArgumentParser(description=description)
parser.add_argument(dest="casename", metavar="CASE", type=str, help="Casename for the calculation")
parser.add_argument("-f", dest="formula", type=str, default='', help="Formula of the chemical system, e.g. Si1O2")
parser.add_argument('--xc', dest='xc_type', type=str, default='PBE', help="type of XC functional. Default PBE")
parser.add_argument('-n', dest='nproc', type=int, default=1, help="The number of processors used to run ABINIT. Default 1, i.e. serial calculation")
parser.add_argument('--pp', dest='pp_type', type=str, default='paw', help="type of pseudopotential. Default PAW.")
parser.add_argument('--mpi', dest='mpitype', type=str, default='mpiexec', help="Type of MPI executive. mpirun|mpiexec|yhrun")
parser.add_argument('--ae', dest='abinit_address', default='abinit', type=str, help="The path of ABINIT executive, default by searching PATH.")
# initialize options as 'opts'
opts = parser.parse_args()
xc_type = opts.xc_type.lower()
pp_type = opts.pp_type.lower()
a_input = abinit_init_input(opts.casename, opts.formula, xc_type, pp_type, \
opts.abinit_address, opts.nproc, opts.mpitype)
# ==============================
if __name__ == "__main__":
Main(sys.argv) | en | 0.425012 | #!/usr/bin/env python # coding=utf-8 # ==================================================== # File Name : pa_init_input.py # Creation Date : 18-04-2018 # Created By : <NAME> # Contact : <EMAIL> # ==================================================== # ==================================================== Initialize the ABINIT input files and a running script # initialize options as 'opts' # ============================== | 2.508392 | 3 |
AO3/users.py | gf0507033/ao3_api | 0 | 6624921 | <reponame>gf0507033/ao3_api<filename>AO3/users.py
import datetime
from cached_property import cached_property
import requests
from bs4 import BeautifulSoup
from . import threadable, utils
from .requester import requester
class User:
"""
AO3 user object
"""
def __init__(self, username, session=None, load=True):
"""Creates a new AO3 user object
Args:
username (str): AO3 username
session (AO3.Session, optional): Used to access additional info
load (bool, optional): If true, the user is loaded on initialization. Defaults to True.
"""
self.username = username
self._session = session
self._soup_works = None
self._soup_profile = None
self._soup_bookmarks = None
self._works = None
self._bookmarks = None
if load:
self.reload()
def __repr__(self):
return f"<User [{self.username}]>"
def __eq__(self, other):
return isinstance(other, User) and other.username == self.username
def __getstate__(self):
d = {}
for attr in self.__dict__:
if isinstance(self.__dict__[attr], BeautifulSoup):
d[attr] = (self.__dict__[attr].encode(), True)
else:
d[attr] = (self.__dict__[attr], False)
return d
def __setstate__(self, d):
for attr in d:
value, issoup = d[attr]
if issoup:
self.__dict__[attr] = BeautifulSoup(value, "lxml")
else:
self.__dict__[attr] = value
def set_session(self, session):
"""Sets the session used to make requests for this work
Args:
session (AO3.Session/AO3.GuestSession): session object
"""
self._session = session
@threadable.threadable
def reload(self):
"""
Loads information about this user.
This function is threadable.
"""
for attr in self.__class__.__dict__:
if isinstance(getattr(self.__class__, attr), cached_property):
if attr in self.__dict__:
delattr(self, attr)
@threadable.threadable
def req_works(username):
self._soup_works = self.request(f"https://archiveofourown.org/users/{username}/works")
@threadable.threadable
def req_profile(username):
self._soup_profile = self.request(f"https://archiveofourown.org/users/{username}/profile")
@threadable.threadable
def req_bookmarks(username):
self._soup_bookmarks = self.request(f"https://archiveofourown.org/users/{username}/bookmarks")
rs = [req_works(self.username, threaded=True),
req_profile(self.username, threaded=True),
req_bookmarks(self.username, threaded=True)]
for r in rs:
r.join()
self._works = None
self._bookmarks = None
def get_avatar(self):
"""Returns a tuple containing the name of the file and its data
Returns:
tuple: (name: str, img: bytes)
"""
icon = self._soup_profile.find("p", {"class": "icon"})
src = icon.img.attrs["src"]
name = src.split("/")[-1].split("?")[0]
img = self.get(src).content
return name, img
@threadable.threadable
def subscribe(self):
"""Subscribes to this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only subscribe to a user using an authenticated session")
utils.subscribe(self.user_id, "User", self._session)
@threadable.threadable
def unsubscribe(self):
"""Unubscribes from this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this user")
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only unsubscribe from a user using an authenticated session")
utils.subscribe(self.user_id, "User", self._session, True, self._sub_id)
@cached_property
def is_subscribed(self):
"""True if you're subscribed to this user"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only get a user ID using an authenticated session")
header = self._soup_profile.find("div", {"class": "primary header module"})
input_ = header.find("input", {"name": "commit", "value": "Unsubscribe"})
return input_ is not None
@cached_property
def user_id(self):
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only get a user ID using an authenticated session")
header = self._soup_profile.find("div", {"class": "primary header module"})
input_ = header.find("input", {"name": "subscription[subscribable_id]"})
if input_ is None:
raise utils.UnexpectedResponseError("Couldn't fetch user ID")
return int(input_.attrs["value"])
@cached_property
def _sub_id(self):
"""Returns the subscription ID. Used for unsubscribing"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this user")
header = self._soup_profile.find("div", {"class": "primary header module"})
id_ = header.form.attrs["action"].split("/")[-1]
return int(id_)
@cached_property
def works(self):
"""Returns the number of works authored by this user
Returns:
int: Number of works
"""
div = self._soup_works.find("div", {'id': 'inner'})
span = div.find("span", {'class': 'current'}).getText().replace("(", "").replace(")", "")
n = span.split(" ")[1]
return int(self.str_format(n))
@cached_property
def _works_pages(self):
pages = self._soup_works.find("ol", {"title": "pagination"})
if pages is None:
return 1
n = 1
for li in pages.findAll("li"):
text = li.getText()
if text.isdigit():
n = int(text)
return n
def get_works(self, use_threading=False):
"""
Get works authored by this user. Loads them if they haven't been previously
Returns:
list: List of works
"""
if self._works is None:
if use_threading:
self.load_works_threaded()
else:
self._works = []
for page in range(self._works_pages):
self._load_works(page=page+1)
return self._works
@threadable.threadable
def load_works_threaded(self):
"""
Get the user's works using threads.
This function is threadable.
"""
threads = []
self._works = []
for page in range(self._works_pages):
threads.append(self._load_works(page=page+1, threaded=True))
for thread in threads:
thread.join()
@threadable.threadable
def _load_works(self, page=1):
from .works import Work
self._soup_works = self.request(f"https://archiveofourown.org/users/{self.username}/works?page={page}")
ol = self._soup_works.find("ol", {'class': 'work index group'})
for work in ol.find_all("li", {'role': 'article'}):
authors = []
if work.h4 is None:
continue
for a in work.h4.find_all("a"):
if 'rel' in a.attrs.keys():
if "author" in a['rel']:
authors.append(User(a.string, load=False))
elif a.attrs["href"].startswith("/works"):
name = a.string
id_ = utils.workid_from_url(a['href'])
new = Work(id_, load=False)
setattr(new, "title", name)
setattr(new, "authors", authors)
if new not in self._works:
self._works.append(new)
@cached_property
def bookmarks(self):
"""Returns the number of works user has bookmarked
Returns:
int: Number of bookmarks
"""
div = self._soup_bookmarks.find("div", {'id': 'inner'})
span = div.find("span", {'class': 'current'}).getText().replace("(", "").replace(")", "")
n = span.split(" ")[1]
return int(self.str_format(n))
@cached_property
def _bookmarks_pages(self):
pages = self._soup_bookmarks.find("ol", {"title": "pagination"})
if pages is None:
return 1
n = 1
for li in pages.findAll("li"):
text = li.getText()
if text.isdigit():
n = int(text)
return n
def get_bookmarks(self, use_threading=False):
"""
Get this user's bookmarked works. Loads them if they haven't been previously
Returns:
list: List of works
"""
if self._bookmarks is None:
if use_threading:
self.load_bookmarks_threaded()
else:
self._bookmarks = []
for page in range(self._bookmarks_pages):
self._load_bookmarks(page=page+1)
return self._bookmarks
@threadable.threadable
def load_bookmarks_threaded(self):
"""
Get the user's bookmarks using threads.
This function is threadable.
"""
threads = []
self._bookmarks = []
for page in range(self._bookmarks_pages):
threads.append(self._load_bookmarks(page=page+1, threaded=True))
for thread in threads:
thread.join()
@threadable.threadable
def _load_bookmarks(self, page=1):
from .works import Work
self._soup_bookmarks = self.request(f"https://archiveofourown.org/users/{self.username}/bookmarks?page={page}")
ol = self._soup_bookmarks.find("ol", {'class': 'bookmark index group'})
for work in ol.find_all("li", {'role': 'article'}):
authors = []
if work.h4 is None:
continue
for a in work.h4.find_all("a"):
if 'rel' in a.attrs.keys():
if "author" in a['rel']:
authors.append(User(a.string, load=False))
elif a.attrs["href"].startswith("/works"):
name = a.string
id_ = utils.workid_from_url(a['href'])
new = Work(id_, load=False)
setattr(new, "title", name)
setattr(new, "authors", authors)
if new not in self._bookmarks:
self._bookmarks.append(new)
@cached_property
def bio(self):
"""Returns the user's bio
Returns:
str: User's bio
"""
blockquote = self._soup_profile.find("blockquote", {'class': 'userstuff'})
return blockquote.getText() if blockquote is not None else ""
@cached_property
def url(self):
"""Returns the URL to the user's profile
Returns:
str: user profile URL
"""
return "https://archiveofourown.org/users/%s"%self.username
def get(self, *args, **kwargs):
"""Request a web page and return a Response object"""
if self._session is None:
req = requester.request("get", *args, **kwargs)
else:
req = requester.request("get", *args, **kwargs, session=self._session.session)
if req.status_code == 429:
raise utils.HTTPError("We are being rate-limited. Try again in a while or reduce the number of requests")
return req
def request(self, url):
"""Request a web page and return a BeautifulSoup object.
Args:
url (str): Url to request
Returns:
bs4.BeautifulSoup: BeautifulSoup object representing the requested page's html
"""
req = self.get(url)
soup = BeautifulSoup(req.content, "lxml")
return soup
@staticmethod
def str_format(string):
"""Formats a given string
Args:
string (str): String to format
Returns:
str: Formatted string
"""
return string.replace(",", "")
| import datetime
from cached_property import cached_property
import requests
from bs4 import BeautifulSoup
from . import threadable, utils
from .requester import requester
class User:
"""
AO3 user object
"""
def __init__(self, username, session=None, load=True):
"""Creates a new AO3 user object
Args:
username (str): AO3 username
session (AO3.Session, optional): Used to access additional info
load (bool, optional): If true, the user is loaded on initialization. Defaults to True.
"""
self.username = username
self._session = session
self._soup_works = None
self._soup_profile = None
self._soup_bookmarks = None
self._works = None
self._bookmarks = None
if load:
self.reload()
def __repr__(self):
return f"<User [{self.username}]>"
def __eq__(self, other):
return isinstance(other, User) and other.username == self.username
def __getstate__(self):
d = {}
for attr in self.__dict__:
if isinstance(self.__dict__[attr], BeautifulSoup):
d[attr] = (self.__dict__[attr].encode(), True)
else:
d[attr] = (self.__dict__[attr], False)
return d
def __setstate__(self, d):
for attr in d:
value, issoup = d[attr]
if issoup:
self.__dict__[attr] = BeautifulSoup(value, "lxml")
else:
self.__dict__[attr] = value
def set_session(self, session):
"""Sets the session used to make requests for this work
Args:
session (AO3.Session/AO3.GuestSession): session object
"""
self._session = session
@threadable.threadable
def reload(self):
"""
Loads information about this user.
This function is threadable.
"""
for attr in self.__class__.__dict__:
if isinstance(getattr(self.__class__, attr), cached_property):
if attr in self.__dict__:
delattr(self, attr)
@threadable.threadable
def req_works(username):
self._soup_works = self.request(f"https://archiveofourown.org/users/{username}/works")
@threadable.threadable
def req_profile(username):
self._soup_profile = self.request(f"https://archiveofourown.org/users/{username}/profile")
@threadable.threadable
def req_bookmarks(username):
self._soup_bookmarks = self.request(f"https://archiveofourown.org/users/{username}/bookmarks")
rs = [req_works(self.username, threaded=True),
req_profile(self.username, threaded=True),
req_bookmarks(self.username, threaded=True)]
for r in rs:
r.join()
self._works = None
self._bookmarks = None
def get_avatar(self):
"""Returns a tuple containing the name of the file and its data
Returns:
tuple: (name: str, img: bytes)
"""
icon = self._soup_profile.find("p", {"class": "icon"})
src = icon.img.attrs["src"]
name = src.split("/")[-1].split("?")[0]
img = self.get(src).content
return name, img
@threadable.threadable
def subscribe(self):
"""Subscribes to this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only subscribe to a user using an authenticated session")
utils.subscribe(self.user_id, "User", self._session)
@threadable.threadable
def unsubscribe(self):
"""Unubscribes from this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this user")
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only unsubscribe from a user using an authenticated session")
utils.subscribe(self.user_id, "User", self._session, True, self._sub_id)
@cached_property
def is_subscribed(self):
"""True if you're subscribed to this user"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only get a user ID using an authenticated session")
header = self._soup_profile.find("div", {"class": "primary header module"})
input_ = header.find("input", {"name": "commit", "value": "Unsubscribe"})
return input_ is not None
@cached_property
def user_id(self):
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only get a user ID using an authenticated session")
header = self._soup_profile.find("div", {"class": "primary header module"})
input_ = header.find("input", {"name": "subscription[subscribable_id]"})
if input_ is None:
raise utils.UnexpectedResponseError("Couldn't fetch user ID")
return int(input_.attrs["value"])
@cached_property
def _sub_id(self):
"""Returns the subscription ID. Used for unsubscribing"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this user")
header = self._soup_profile.find("div", {"class": "primary header module"})
id_ = header.form.attrs["action"].split("/")[-1]
return int(id_)
@cached_property
def works(self):
"""Returns the number of works authored by this user
Returns:
int: Number of works
"""
div = self._soup_works.find("div", {'id': 'inner'})
span = div.find("span", {'class': 'current'}).getText().replace("(", "").replace(")", "")
n = span.split(" ")[1]
return int(self.str_format(n))
@cached_property
def _works_pages(self):
pages = self._soup_works.find("ol", {"title": "pagination"})
if pages is None:
return 1
n = 1
for li in pages.findAll("li"):
text = li.getText()
if text.isdigit():
n = int(text)
return n
def get_works(self, use_threading=False):
"""
Get works authored by this user. Loads them if they haven't been previously
Returns:
list: List of works
"""
if self._works is None:
if use_threading:
self.load_works_threaded()
else:
self._works = []
for page in range(self._works_pages):
self._load_works(page=page+1)
return self._works
@threadable.threadable
def load_works_threaded(self):
"""
Get the user's works using threads.
This function is threadable.
"""
threads = []
self._works = []
for page in range(self._works_pages):
threads.append(self._load_works(page=page+1, threaded=True))
for thread in threads:
thread.join()
@threadable.threadable
def _load_works(self, page=1):
from .works import Work
self._soup_works = self.request(f"https://archiveofourown.org/users/{self.username}/works?page={page}")
ol = self._soup_works.find("ol", {'class': 'work index group'})
for work in ol.find_all("li", {'role': 'article'}):
authors = []
if work.h4 is None:
continue
for a in work.h4.find_all("a"):
if 'rel' in a.attrs.keys():
if "author" in a['rel']:
authors.append(User(a.string, load=False))
elif a.attrs["href"].startswith("/works"):
name = a.string
id_ = utils.workid_from_url(a['href'])
new = Work(id_, load=False)
setattr(new, "title", name)
setattr(new, "authors", authors)
if new not in self._works:
self._works.append(new)
@cached_property
def bookmarks(self):
"""Returns the number of works user has bookmarked
Returns:
int: Number of bookmarks
"""
div = self._soup_bookmarks.find("div", {'id': 'inner'})
span = div.find("span", {'class': 'current'}).getText().replace("(", "").replace(")", "")
n = span.split(" ")[1]
return int(self.str_format(n))
@cached_property
def _bookmarks_pages(self):
pages = self._soup_bookmarks.find("ol", {"title": "pagination"})
if pages is None:
return 1
n = 1
for li in pages.findAll("li"):
text = li.getText()
if text.isdigit():
n = int(text)
return n
def get_bookmarks(self, use_threading=False):
"""
Get this user's bookmarked works. Loads them if they haven't been previously
Returns:
list: List of works
"""
if self._bookmarks is None:
if use_threading:
self.load_bookmarks_threaded()
else:
self._bookmarks = []
for page in range(self._bookmarks_pages):
self._load_bookmarks(page=page+1)
return self._bookmarks
@threadable.threadable
def load_bookmarks_threaded(self):
"""
Get the user's bookmarks using threads.
This function is threadable.
"""
threads = []
self._bookmarks = []
for page in range(self._bookmarks_pages):
threads.append(self._load_bookmarks(page=page+1, threaded=True))
for thread in threads:
thread.join()
@threadable.threadable
def _load_bookmarks(self, page=1):
from .works import Work
self._soup_bookmarks = self.request(f"https://archiveofourown.org/users/{self.username}/bookmarks?page={page}")
ol = self._soup_bookmarks.find("ol", {'class': 'bookmark index group'})
for work in ol.find_all("li", {'role': 'article'}):
authors = []
if work.h4 is None:
continue
for a in work.h4.find_all("a"):
if 'rel' in a.attrs.keys():
if "author" in a['rel']:
authors.append(User(a.string, load=False))
elif a.attrs["href"].startswith("/works"):
name = a.string
id_ = utils.workid_from_url(a['href'])
new = Work(id_, load=False)
setattr(new, "title", name)
setattr(new, "authors", authors)
if new not in self._bookmarks:
self._bookmarks.append(new)
@cached_property
def bio(self):
"""Returns the user's bio
Returns:
str: User's bio
"""
blockquote = self._soup_profile.find("blockquote", {'class': 'userstuff'})
return blockquote.getText() if blockquote is not None else ""
@cached_property
def url(self):
"""Returns the URL to the user's profile
Returns:
str: user profile URL
"""
return "https://archiveofourown.org/users/%s"%self.username
def get(self, *args, **kwargs):
"""Request a web page and return a Response object"""
if self._session is None:
req = requester.request("get", *args, **kwargs)
else:
req = requester.request("get", *args, **kwargs, session=self._session.session)
if req.status_code == 429:
raise utils.HTTPError("We are being rate-limited. Try again in a while or reduce the number of requests")
return req
def request(self, url):
"""Request a web page and return a BeautifulSoup object.
Args:
url (str): Url to request
Returns:
bs4.BeautifulSoup: BeautifulSoup object representing the requested page's html
"""
req = self.get(url)
soup = BeautifulSoup(req.content, "lxml")
return soup
@staticmethod
def str_format(string):
"""Formats a given string
Args:
string (str): String to format
Returns:
str: Formatted string
"""
return string.replace(",", "") | en | 0.760632 | AO3 user object Creates a new AO3 user object
Args:
username (str): AO3 username
session (AO3.Session, optional): Used to access additional info
load (bool, optional): If true, the user is loaded on initialization. Defaults to True. Sets the session used to make requests for this work
Args:
session (AO3.Session/AO3.GuestSession): session object Loads information about this user.
This function is threadable. Returns a tuple containing the name of the file and its data
Returns:
tuple: (name: str, img: bytes) Subscribes to this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session Unubscribes from this user.
This function is threadable.
Raises:
utils.AuthError: Invalid session True if you're subscribed to this user Returns the subscription ID. Used for unsubscribing Returns the number of works authored by this user
Returns:
int: Number of works Get works authored by this user. Loads them if they haven't been previously
Returns:
list: List of works Get the user's works using threads.
This function is threadable. Returns the number of works user has bookmarked
Returns:
int: Number of bookmarks Get this user's bookmarked works. Loads them if they haven't been previously
Returns:
list: List of works Get the user's bookmarks using threads.
This function is threadable. Returns the user's bio
Returns:
str: User's bio Returns the URL to the user's profile
Returns:
str: user profile URL Request a web page and return a Response object Request a web page and return a BeautifulSoup object.
Args:
url (str): Url to request
Returns:
bs4.BeautifulSoup: BeautifulSoup object representing the requested page's html Formats a given string
Args:
string (str): String to format
Returns:
str: Formatted string | 2.750346 | 3 |
src/utils/formatter.py | XxKavosxX/consumption_data_analysis | 0 | 6624922 | <filename>src/utils/formatter.py<gh_stars>0
import numpy as np
import pandas as pd
from datetime import timedelta
from itertools import groupby, count
G22 = 1760/(22*1000)
G47 = 1760/(47*1000)
G82 = 1760/(82*1000)
def dt_remover(df):
indice = count()
zipped = zip(df.index, indice)
lst = [(date - timedelta(seconds=0.7*indice)) for date, indice in zipped]
df.index = lst
return df.resample('2min').mean()
def rename_columns(df):
df.columns = [label.split('/')[1] for label in list(df.columns)]
def apply_sct_gain(df):
variable = {'Irms','Pwr'}
for node_name in list(set(df.columns.get_level_values(0))):
for col in df[node_name].columns:
el = col.split('/')
if el[1] in variable:
if 'R47' in col:
df[node_name, col] *= G47
elif 'R82' in col:
df[node_name, col] *= G82
elif 'R22' in col:
df[node_name, col] *= G22
def pivot_database(path, file_name):
df_csv = pd.read_csv(path+"/"+file_name, infer_datetime_format=True)
pvt_df = pd.pivot_table(df_csv, index=['date'], values=['payload'], columns=['client_id','topic_path'])
pvt_df.index = pd.DatetimeIndex(pvt_df.index)
pvt_df = pvt_df.resample('2Min').mean()
pvt_df.sort_index(axis=1, inplace = True)
node_id = np.array(pvt_df.columns.get_level_values(1))
node_sens_info = np.array(pvt_df.columns.get_level_values(2))
header = [node_id, [info.split('/',2)[2] for info in node_sens_info]]
header_tuple = list(zip(*header))
multi_index = pd.MultiIndex.from_tuples(header_tuple, names=['', ''])
pvt_df.columns = multi_index
return pvt_df
def remove_offset(serie, threshold = 0.0, offset = 0.0):
offsetted = ~(serie < threshold)
serie.loc[offsetted] -= offset
def get_energy(df):
on = df['Pwr'] > 0
return df[on]['Pwr'].sum()/30
def get_usetime(df):
on = df['Pwr'] > 5
return df[on]['Pwr'].count()/30
| <filename>src/utils/formatter.py<gh_stars>0
import numpy as np
import pandas as pd
from datetime import timedelta
from itertools import groupby, count
G22 = 1760/(22*1000)
G47 = 1760/(47*1000)
G82 = 1760/(82*1000)
def dt_remover(df):
indice = count()
zipped = zip(df.index, indice)
lst = [(date - timedelta(seconds=0.7*indice)) for date, indice in zipped]
df.index = lst
return df.resample('2min').mean()
def rename_columns(df):
df.columns = [label.split('/')[1] for label in list(df.columns)]
def apply_sct_gain(df):
variable = {'Irms','Pwr'}
for node_name in list(set(df.columns.get_level_values(0))):
for col in df[node_name].columns:
el = col.split('/')
if el[1] in variable:
if 'R47' in col:
df[node_name, col] *= G47
elif 'R82' in col:
df[node_name, col] *= G82
elif 'R22' in col:
df[node_name, col] *= G22
def pivot_database(path, file_name):
df_csv = pd.read_csv(path+"/"+file_name, infer_datetime_format=True)
pvt_df = pd.pivot_table(df_csv, index=['date'], values=['payload'], columns=['client_id','topic_path'])
pvt_df.index = pd.DatetimeIndex(pvt_df.index)
pvt_df = pvt_df.resample('2Min').mean()
pvt_df.sort_index(axis=1, inplace = True)
node_id = np.array(pvt_df.columns.get_level_values(1))
node_sens_info = np.array(pvt_df.columns.get_level_values(2))
header = [node_id, [info.split('/',2)[2] for info in node_sens_info]]
header_tuple = list(zip(*header))
multi_index = pd.MultiIndex.from_tuples(header_tuple, names=['', ''])
pvt_df.columns = multi_index
return pvt_df
def remove_offset(serie, threshold = 0.0, offset = 0.0):
offsetted = ~(serie < threshold)
serie.loc[offsetted] -= offset
def get_energy(df):
on = df['Pwr'] > 0
return df[on]['Pwr'].sum()/30
def get_usetime(df):
on = df['Pwr'] > 5
return df[on]['Pwr'].count()/30
| none | 1 | 2.438276 | 2 | |
mancala/mancala.py | jaywon99/mancala | 0 | 6624923 | # -*- coding: utf-8 -*-
'''Mancala Board
https://www.thesprucecrafts.com/how-to-play-mancala-409424
'''
import pickle
class DistributeIterator:
def __init__(self, order, start, nth):
self.order = order
self.start = start
self.nth = nth
self.pos = 0
def _go_next(self):
self.pos += 1
if self.pos >= len(self.order):
self.pos = self.pos % len(self.order)
def __iter__(self):
self.pos = self.order.index(self.start)
return self
def __next__(self):
self.nth -= 1
if self.nth < 0:
raise StopIteration
self._go_next()
return self.order[self.pos]
class MancalaPlayer:
def __init__(self, board, home, pits, pits_order):
self.board = board # final
self.home = home # final
self.pits = pits # final
self.pits_order = pits_order # final
self.opposite_player = None # almost final (just 1 set)
def reset(self):
pass
def set_opposite_player(self, opposite_player):
self.opposite_player = opposite_player
def available_actions(self):
actions = []
for pos in self.pits:
if self.board[pos] != 0:
actions.append(pos)
return actions
def get_stones_in_pits(self):
return [self.board[pos] for pos in self.pits]
def step(self, action):
self.board.add_play_log(self.home, action)
if action not in self.pits:
# ERROR! FOUL! You can pick from your pits only.
return None, -100, True, True
if self.board[action] == 0:
# ERROR! FOUL! You should pick pit which stone exist.
return None, -100, True, True
# pick up stones
# Rule #04: The game begins with one player picking up all of the pieces in any one of the holes on their side.
stones = self.board[action]
# Rule #05: Moving counter-clockwise, the player deposits one of the stones in each hole until the stones run out.
last_pos = None
for pos in DistributeIterator(self.pits_order, action, stones):
self.board.move(action, pos, 1)
last_pos = pos
# Rule #07: If the last piece you drop is in your own store, you get a free turn.
play_again = False
if last_pos == self.home:
# DO ONE MORE
play_again = True
# landing my empty spot, take oppsite position stones, too
# Rule #08: If the last piece you drop is in an empty hole on your side, you capture that piece and any pieces in the hole directly opposite.
if last_pos in self.pits and self.board[last_pos] == 1:
opposite_position = MancalaBoard.oppsite_position(last_pos)
self.board.move(opposite_position, self.home)
self.board.move(last_pos, self.home)
# check stones left on my pits?
# Rule #10: The game ends when all six spaces on one side of the Mancala board are empty.
done = False
if len(self.available_actions()) == 0:
# No more stones on my pits
# Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces.
self.opposite_player.swipe_out()
done = True
my_score = self.score()
your_score = self.opposite_player.score()
return self.board.observation(), my_score - your_score, done, play_again
# return observation, my_score - your_score, done, play_again
def score(self):
''' Rule #12: Count all the pieces in each store. The winner is the player with the most pieces.
'''
return self.board[self.home]
def swipe_out(self):
'''
Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces.
'''
for pos in self.available_actions():
self.board.move(pos, self.home)
def player_id(self):
return self.home
class MancalaBoard:
CELL_LIST = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'RH', 'BH']
RED_HOME = 'RH'
BLUE_HOME = 'BH'
# RED_PITS = ['R6', 'R5', 'R4', 'R3', 'R2', 'R1']
# BLUE_PITS = ['B6', 'B5', 'B4', 'B3', 'B2', 'B1']
RED_PITS = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
BLUE_PITS = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6']
# Rule 06: If you run into your own store, deposit one piece in it. If you run into your opponent's store, skip it.
RED_ORDER = ['R6', 'R5', 'R4', 'R3', 'R2', 'R1', 'RH', 'B6', 'B5', 'B4', 'B3', 'B2', 'B1']
BLUE_ORDER = ['B6', 'B5', 'B4', 'B3', 'B2', 'B1', 'BH', 'R6', 'R5', 'R4', 'R3', 'R2', 'R1']
OPPOSITE_POSITION = {
'R6': 'B1',
'R5': 'B2',
'R4': 'B3',
'R3': 'B4',
'R2': 'B5',
'R1': 'B6',
'B6': 'R1',
'B5': 'R2',
'B4': 'R3',
'B3': 'R4',
'B2': 'R5',
'B1': 'R6',
}
def __init__(self):
self.board = {c:0 for c in MancalaBoard.CELL_LIST}
self.log = []
def reset(self):
self.board = {c:4 for c in MancalaBoard.CELL_LIST}
self.board['BH'] = 0
self.board['RH'] = 0
self.log = []
def observation(self):
# 이걸 어떻게 define할지
# 가장 쉬운 건 [self.board[pos] for pos in CELL_LIST] 일텐데.. Compact 할 수는 없을까?
return [self.board[pos] for pos in self.CELL_LIST]
def add_play_log(self, who, action):
self.log.append((who, action)) # we can get who from action anyway
def move(self, pos_from, pos_to, cnt = None):
'''
to animate, make log here.
if cnt == None: move everything
'''
# make log to animate
# print(pos_from, pos_to, cnt)
if cnt == None:
cnt = self.board[pos_from]
self.board[pos_from] -= cnt
self.board[pos_to] += cnt
@staticmethod
def oppsite_position(pos):
return MancalaBoard.OPPOSITE_POSITION[pos]
def create_memento(self):
return pickle.dumps((self.board, self.log))
def restore_memento(self, memory):
(self.board, self.log) = pickle.loads(memory)
def __getitem__(self, key):
return self.board[key]
def __setitem__(self, key, value):
self.board[key] = value
class Mancala:
def __init__(self):
self.board = MancalaBoard()
self.player1 = MancalaPlayer(self.board, MancalaBoard.RED_HOME, MancalaBoard.RED_PITS, MancalaBoard.RED_ORDER)
self.player2 = MancalaPlayer(self.board, MancalaBoard.BLUE_HOME, MancalaBoard.BLUE_PITS, MancalaBoard.BLUE_ORDER)
self.reset()
def reset(self):
self.board.reset()
self.player1.reset()
self.player2.reset()
self.player1.set_opposite_player(self.player2)
self.player2.set_opposite_player(self.player1)
self.current_player = self.player2
return self.board.observation()
def next_player(self, play_again):
if play_again:
return self.current_player
if self.current_player == self.player1:
self.current_player = self.player2
else:
self.current_player = self.player1
return self.current_player
def oppsite_player(self):
if self.current_player == self.player1:
return self.player2
return self.player1
def create_memento(self):
return (self.board.create_memento(), self.current_player.player_id())
def restore_memento(self, memory):
self.board.restore_memento(memory[0])
if memory[1] == self.player1.player_id():
self.current_player = self.player1
else:
self.current_player = self.player2
def print(self):
print("+----+----+----+----+----+----+----+----+")
print("| | %2d | %2d | %2d | %2d | %2d | %2d | |" % (self.board['R1'], self.board['R2'], self.board['R3'], self.board['R4'], self.board['R5'], self.board['R6']))
print("+ %2d +----+----+----+----+----+----+ %2d +" % (self.board['RH'], self.board['BH']))
print("| | %2d | %2d | %2d | %2d | %2d | %2d | |" % (self.board['B6'], self.board['B5'], self.board['B4'], self.board['B3'], self.board['B2'], self.board['B1']))
print("+----+----+----+----+----+----+----+----+")
print("-----------------------------------------------")
| # -*- coding: utf-8 -*-
'''Mancala Board
https://www.thesprucecrafts.com/how-to-play-mancala-409424
'''
import pickle
class DistributeIterator:
def __init__(self, order, start, nth):
self.order = order
self.start = start
self.nth = nth
self.pos = 0
def _go_next(self):
self.pos += 1
if self.pos >= len(self.order):
self.pos = self.pos % len(self.order)
def __iter__(self):
self.pos = self.order.index(self.start)
return self
def __next__(self):
self.nth -= 1
if self.nth < 0:
raise StopIteration
self._go_next()
return self.order[self.pos]
class MancalaPlayer:
def __init__(self, board, home, pits, pits_order):
self.board = board # final
self.home = home # final
self.pits = pits # final
self.pits_order = pits_order # final
self.opposite_player = None # almost final (just 1 set)
def reset(self):
pass
def set_opposite_player(self, opposite_player):
self.opposite_player = opposite_player
def available_actions(self):
actions = []
for pos in self.pits:
if self.board[pos] != 0:
actions.append(pos)
return actions
def get_stones_in_pits(self):
return [self.board[pos] for pos in self.pits]
def step(self, action):
self.board.add_play_log(self.home, action)
if action not in self.pits:
# ERROR! FOUL! You can pick from your pits only.
return None, -100, True, True
if self.board[action] == 0:
# ERROR! FOUL! You should pick pit which stone exist.
return None, -100, True, True
# pick up stones
# Rule #04: The game begins with one player picking up all of the pieces in any one of the holes on their side.
stones = self.board[action]
# Rule #05: Moving counter-clockwise, the player deposits one of the stones in each hole until the stones run out.
last_pos = None
for pos in DistributeIterator(self.pits_order, action, stones):
self.board.move(action, pos, 1)
last_pos = pos
# Rule #07: If the last piece you drop is in your own store, you get a free turn.
play_again = False
if last_pos == self.home:
# DO ONE MORE
play_again = True
# landing my empty spot, take oppsite position stones, too
# Rule #08: If the last piece you drop is in an empty hole on your side, you capture that piece and any pieces in the hole directly opposite.
if last_pos in self.pits and self.board[last_pos] == 1:
opposite_position = MancalaBoard.oppsite_position(last_pos)
self.board.move(opposite_position, self.home)
self.board.move(last_pos, self.home)
# check stones left on my pits?
# Rule #10: The game ends when all six spaces on one side of the Mancala board are empty.
done = False
if len(self.available_actions()) == 0:
# No more stones on my pits
# Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces.
self.opposite_player.swipe_out()
done = True
my_score = self.score()
your_score = self.opposite_player.score()
return self.board.observation(), my_score - your_score, done, play_again
# return observation, my_score - your_score, done, play_again
def score(self):
''' Rule #12: Count all the pieces in each store. The winner is the player with the most pieces.
'''
return self.board[self.home]
def swipe_out(self):
'''
Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces.
'''
for pos in self.available_actions():
self.board.move(pos, self.home)
def player_id(self):
return self.home
class MancalaBoard:
CELL_LIST = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'RH', 'BH']
RED_HOME = 'RH'
BLUE_HOME = 'BH'
# RED_PITS = ['R6', 'R5', 'R4', 'R3', 'R2', 'R1']
# BLUE_PITS = ['B6', 'B5', 'B4', 'B3', 'B2', 'B1']
RED_PITS = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
BLUE_PITS = ['B1', 'B2', 'B3', 'B4', 'B5', 'B6']
# Rule 06: If you run into your own store, deposit one piece in it. If you run into your opponent's store, skip it.
RED_ORDER = ['R6', 'R5', 'R4', 'R3', 'R2', 'R1', 'RH', 'B6', 'B5', 'B4', 'B3', 'B2', 'B1']
BLUE_ORDER = ['B6', 'B5', 'B4', 'B3', 'B2', 'B1', 'BH', 'R6', 'R5', 'R4', 'R3', 'R2', 'R1']
OPPOSITE_POSITION = {
'R6': 'B1',
'R5': 'B2',
'R4': 'B3',
'R3': 'B4',
'R2': 'B5',
'R1': 'B6',
'B6': 'R1',
'B5': 'R2',
'B4': 'R3',
'B3': 'R4',
'B2': 'R5',
'B1': 'R6',
}
def __init__(self):
self.board = {c:0 for c in MancalaBoard.CELL_LIST}
self.log = []
def reset(self):
self.board = {c:4 for c in MancalaBoard.CELL_LIST}
self.board['BH'] = 0
self.board['RH'] = 0
self.log = []
def observation(self):
# 이걸 어떻게 define할지
# 가장 쉬운 건 [self.board[pos] for pos in CELL_LIST] 일텐데.. Compact 할 수는 없을까?
return [self.board[pos] for pos in self.CELL_LIST]
def add_play_log(self, who, action):
self.log.append((who, action)) # we can get who from action anyway
def move(self, pos_from, pos_to, cnt = None):
'''
to animate, make log here.
if cnt == None: move everything
'''
# make log to animate
# print(pos_from, pos_to, cnt)
if cnt == None:
cnt = self.board[pos_from]
self.board[pos_from] -= cnt
self.board[pos_to] += cnt
@staticmethod
def oppsite_position(pos):
return MancalaBoard.OPPOSITE_POSITION[pos]
def create_memento(self):
return pickle.dumps((self.board, self.log))
def restore_memento(self, memory):
(self.board, self.log) = pickle.loads(memory)
def __getitem__(self, key):
return self.board[key]
def __setitem__(self, key, value):
self.board[key] = value
class Mancala:
def __init__(self):
self.board = MancalaBoard()
self.player1 = MancalaPlayer(self.board, MancalaBoard.RED_HOME, MancalaBoard.RED_PITS, MancalaBoard.RED_ORDER)
self.player2 = MancalaPlayer(self.board, MancalaBoard.BLUE_HOME, MancalaBoard.BLUE_PITS, MancalaBoard.BLUE_ORDER)
self.reset()
def reset(self):
self.board.reset()
self.player1.reset()
self.player2.reset()
self.player1.set_opposite_player(self.player2)
self.player2.set_opposite_player(self.player1)
self.current_player = self.player2
return self.board.observation()
def next_player(self, play_again):
if play_again:
return self.current_player
if self.current_player == self.player1:
self.current_player = self.player2
else:
self.current_player = self.player1
return self.current_player
def oppsite_player(self):
if self.current_player == self.player1:
return self.player2
return self.player1
def create_memento(self):
return (self.board.create_memento(), self.current_player.player_id())
def restore_memento(self, memory):
self.board.restore_memento(memory[0])
if memory[1] == self.player1.player_id():
self.current_player = self.player1
else:
self.current_player = self.player2
def print(self):
print("+----+----+----+----+----+----+----+----+")
print("| | %2d | %2d | %2d | %2d | %2d | %2d | |" % (self.board['R1'], self.board['R2'], self.board['R3'], self.board['R4'], self.board['R5'], self.board['R6']))
print("+ %2d +----+----+----+----+----+----+ %2d +" % (self.board['RH'], self.board['BH']))
print("| | %2d | %2d | %2d | %2d | %2d | %2d | |" % (self.board['B6'], self.board['B5'], self.board['B4'], self.board['B3'], self.board['B2'], self.board['B1']))
print("+----+----+----+----+----+----+----+----+")
print("-----------------------------------------------")
| en | 0.918295 | # -*- coding: utf-8 -*- Mancala Board https://www.thesprucecrafts.com/how-to-play-mancala-409424 # final # final # final # final # almost final (just 1 set) # ERROR! FOUL! You can pick from your pits only. # ERROR! FOUL! You should pick pit which stone exist. # pick up stones # Rule #04: The game begins with one player picking up all of the pieces in any one of the holes on their side. # Rule #05: Moving counter-clockwise, the player deposits one of the stones in each hole until the stones run out. # Rule #07: If the last piece you drop is in your own store, you get a free turn. # DO ONE MORE # landing my empty spot, take oppsite position stones, too # Rule #08: If the last piece you drop is in an empty hole on your side, you capture that piece and any pieces in the hole directly opposite. # check stones left on my pits? # Rule #10: The game ends when all six spaces on one side of the Mancala board are empty. # No more stones on my pits # Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces. # return observation, my_score - your_score, done, play_again Rule #12: Count all the pieces in each store. The winner is the player with the most pieces. Rule #11: The player who still has pieces on his side of the board when the game ends capture all of those pieces. # RED_PITS = ['R6', 'R5', 'R4', 'R3', 'R2', 'R1'] # BLUE_PITS = ['B6', 'B5', 'B4', 'B3', 'B2', 'B1'] # Rule 06: If you run into your own store, deposit one piece in it. If you run into your opponent's store, skip it. # 이걸 어떻게 define할지 # 가장 쉬운 건 [self.board[pos] for pos in CELL_LIST] 일텐데.. Compact 할 수는 없을까? # we can get who from action anyway to animate, make log here. if cnt == None: move everything # make log to animate # print(pos_from, pos_to, cnt) | 3.992585 | 4 |
pf-net/shapenet_part_loader.py | 63445538/Contrib | 0 | 6624924 | # from __future__ import print_function
import paddle.fluid as fluid
import os
import os.path
import json
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dataset_path = os.path.abspath(
os.path.join(BASE_DIR, 'dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/'))
class PartDataset(object):
def __init__(self, root=dataset_path, num_point=2500, classification=True, class_choice=None, mode='train',
normalize=True):
self.num_point = num_point
self.root = root
self.mode = mode
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
# print(self.cat)
if not class_choice is None:
self.cat = {k: v for k, v in self.cat.items() if k in class_choice}
print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
# print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item], 'points')
dir_seg = os.path.join(self.root, self.cat[item], 'points_label')
# print(dir_point, dir_seg)
fns = sorted(os.listdir(dir_point))
if self.mode == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif self.mode == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif self.mode == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif self.mode == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % self.mode)
sys.exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg'),
self.cat[item], token))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn[0], fn[1], fn[2], fn[3]))
self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))
print(self.classes)
self.num_seg_classes = 0
if not self.classification:
for i in range(len(self.datapath) // 50):
l = len(np.unique(np.loadtxt(self.datapath[i][2]).astype(np.uint8)))
if l > self.num_seg_classes:
self.num_seg_classes = l
# print(self.num_seg_classes)
def get_random_sample(self, index):
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
# cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1]).astype(np.float32)
if self.normalize:
point_set = self.pc_normalize(point_set)
seg = np.loadtxt(fn[2]).astype(np.int64) - 1
foldername = fn[3]
filename = fn[4]
# print(point_set.shape, seg.shape)
choice = np.random.choice(len(seg), self.num_point, replace=True)
# resample
point_set = point_set[choice, :]
seg = seg[choice]
# To Pytorch
# point_set = torch.from_numpy(point_set)
# seg = torch.from_numpy(seg)
# cls = torch.from_numpy(np.array([cls]).astype(np.int64))
# To PaddlePaddle
if self.classification:
return point_set, cls
else:
return point_set, seg, cls
def __len__(self):
return len(self.datapath)
def pc_normalize(self, pc):
""" pc: NxC, return NxC """
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
def get_reader(self, batch_size):
batch_num = int(len(self.datapath)/batch_size)
def __reader__():
for _ in range(batch_num):
sample_list = []
for _ in range(batch_size):
choice = np.random.choice(len(self.datapath))
point, label = self.get_random_sample(choice)
sample_list.append([point, label])
yield sample_list
return __reader__
if __name__ == '__main__':
dset = PartDataset(
root='/home/arclab/PF-Net-Point-Fractal-Network/dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/',
classification=True, class_choice=None, num_point=2048, mode='train')
place = fluid.CUDAPlace(0) # 或者 fluid.CUDAPlace(0)
fluid.enable_imperative(place)
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_sample_list_generator(dset.get_reader(32), places=place)
for data in train_loader():
points, label = data
batch_size = points.shape[0]
print(label)
# print(ps.size(), ps.type(), cls.size(), cls.type())
# print(ps)
# ps = ps.numpy()
# np.savetxt('ps'+'.txt', ps, fmt = "%f %f %f")
| # from __future__ import print_function
import paddle.fluid as fluid
import os
import os.path
import json
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dataset_path = os.path.abspath(
os.path.join(BASE_DIR, 'dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/'))
class PartDataset(object):
def __init__(self, root=dataset_path, num_point=2500, classification=True, class_choice=None, mode='train',
normalize=True):
self.num_point = num_point
self.root = root
self.mode = mode
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
# print(self.cat)
if not class_choice is None:
self.cat = {k: v for k, v in self.cat.items() if k in class_choice}
print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
# print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item], 'points')
dir_seg = os.path.join(self.root, self.cat[item], 'points_label')
# print(dir_point, dir_seg)
fns = sorted(os.listdir(dir_point))
if self.mode == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif self.mode == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif self.mode == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif self.mode == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % self.mode)
sys.exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg'),
self.cat[item], token))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn[0], fn[1], fn[2], fn[3]))
self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))
print(self.classes)
self.num_seg_classes = 0
if not self.classification:
for i in range(len(self.datapath) // 50):
l = len(np.unique(np.loadtxt(self.datapath[i][2]).astype(np.uint8)))
if l > self.num_seg_classes:
self.num_seg_classes = l
# print(self.num_seg_classes)
def get_random_sample(self, index):
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
# cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1]).astype(np.float32)
if self.normalize:
point_set = self.pc_normalize(point_set)
seg = np.loadtxt(fn[2]).astype(np.int64) - 1
foldername = fn[3]
filename = fn[4]
# print(point_set.shape, seg.shape)
choice = np.random.choice(len(seg), self.num_point, replace=True)
# resample
point_set = point_set[choice, :]
seg = seg[choice]
# To Pytorch
# point_set = torch.from_numpy(point_set)
# seg = torch.from_numpy(seg)
# cls = torch.from_numpy(np.array([cls]).astype(np.int64))
# To PaddlePaddle
if self.classification:
return point_set, cls
else:
return point_set, seg, cls
def __len__(self):
return len(self.datapath)
def pc_normalize(self, pc):
""" pc: NxC, return NxC """
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
def get_reader(self, batch_size):
batch_num = int(len(self.datapath)/batch_size)
def __reader__():
for _ in range(batch_num):
sample_list = []
for _ in range(batch_size):
choice = np.random.choice(len(self.datapath))
point, label = self.get_random_sample(choice)
sample_list.append([point, label])
yield sample_list
return __reader__
if __name__ == '__main__':
dset = PartDataset(
root='/home/arclab/PF-Net-Point-Fractal-Network/dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/',
classification=True, class_choice=None, num_point=2048, mode='train')
place = fluid.CUDAPlace(0) # 或者 fluid.CUDAPlace(0)
fluid.enable_imperative(place)
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_sample_list_generator(dset.get_reader(32), places=place)
for data in train_loader():
points, label = data
batch_size = points.shape[0]
print(label)
# print(ps.size(), ps.type(), cls.size(), cls.type())
# print(ps)
# ps = ps.numpy()
# np.savetxt('ps'+'.txt', ps, fmt = "%f %f %f")
| en | 0.227624 | # from __future__ import print_function # print(self.cat) # print('category', item) # print(dir_point, dir_seg) # print(self.num_seg_classes) # cls = np.array([cls]).astype(np.int32) # print(point_set.shape, seg.shape) # resample # To Pytorch # point_set = torch.from_numpy(point_set) # seg = torch.from_numpy(seg) # cls = torch.from_numpy(np.array([cls]).astype(np.int64)) # To PaddlePaddle pc: NxC, return NxC # 或者 fluid.CUDAPlace(0) # print(ps.size(), ps.type(), cls.size(), cls.type()) # print(ps) # ps = ps.numpy() # np.savetxt('ps'+'.txt', ps, fmt = "%f %f %f") | 2.403304 | 2 |
pay-api/src/pay_api/models/payment_account.py | thorwolpert/sbc-pay | 0 | 6624925 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model to handle all operations related to Payment Account data."""
from marshmallow import fields
from sqlalchemy import Boolean, ForeignKey
from .base_model import VersionedModel
from .db import db
from .base_schema import BaseSchema
class PaymentAccount(VersionedModel): # pylint: disable=too-many-instance-attributes
"""This class manages all of the base data about Payment Account."""
__tablename__ = 'payment_accounts'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# Account ID from auth, not present for FAS accounts.
auth_account_id = db.Column(db.String(50), nullable=True, index=True)
# used for sending out notifications.The statement emails needs account name
name = db.Column(db.String(250), nullable=True, index=False)
payment_method = db.Column(db.String(15), ForeignKey('payment_methods.code'), nullable=True)
bcol_user_id = db.Column(db.String(50), nullable=True, index=True)
bcol_account = db.Column(db.String(50), nullable=True, index=True)
# when this is enabled , send out the notifications
statement_notification_enabled = db.Column('statement_notification_enabled', Boolean(), default=False)
credit = db.Column(db.Float, nullable=True)
billable = db.Column(Boolean(), default=True)
# before this date , the account shouldn't get used
pad_activation_date = db.Column(db.DateTime, nullable=True)
pad_tos_accepted_date = db.Column(db.DateTime, nullable=True)
pad_tos_accepted_by = db.Column(db.String(50), nullable=True)
def __str__(self):
"""Override to string."""
return f'{self.name or ""} ({self.auth_account_id})'
@classmethod
def find_by_auth_account_id(cls, auth_account_id: str):
"""Return a Account by id."""
return cls.query.filter_by(auth_account_id=str(auth_account_id)).one_or_none()
class PaymentAccountSchema(BaseSchema): # pylint: disable=too-many-ancestors
"""Main schema used to serialize the Payment Account."""
class Meta(BaseSchema.Meta): # pylint: disable=too-few-public-methods
"""Returns all the fields from the SQLAlchemy class."""
model = PaymentAccount
exclude = ['versions', 'pad_activation_date']
payment_method = fields.String(data_key='payment_method')
auth_account_id = fields.String(data_key='account_id')
name = fields.String(data_key='account_name')
| # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model to handle all operations related to Payment Account data."""
from marshmallow import fields
from sqlalchemy import Boolean, ForeignKey
from .base_model import VersionedModel
from .db import db
from .base_schema import BaseSchema
class PaymentAccount(VersionedModel): # pylint: disable=too-many-instance-attributes
"""This class manages all of the base data about Payment Account."""
__tablename__ = 'payment_accounts'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# Account ID from auth, not present for FAS accounts.
auth_account_id = db.Column(db.String(50), nullable=True, index=True)
# used for sending out notifications.The statement emails needs account name
name = db.Column(db.String(250), nullable=True, index=False)
payment_method = db.Column(db.String(15), ForeignKey('payment_methods.code'), nullable=True)
bcol_user_id = db.Column(db.String(50), nullable=True, index=True)
bcol_account = db.Column(db.String(50), nullable=True, index=True)
# when this is enabled , send out the notifications
statement_notification_enabled = db.Column('statement_notification_enabled', Boolean(), default=False)
credit = db.Column(db.Float, nullable=True)
billable = db.Column(Boolean(), default=True)
# before this date , the account shouldn't get used
pad_activation_date = db.Column(db.DateTime, nullable=True)
pad_tos_accepted_date = db.Column(db.DateTime, nullable=True)
pad_tos_accepted_by = db.Column(db.String(50), nullable=True)
def __str__(self):
"""Override to string."""
return f'{self.name or ""} ({self.auth_account_id})'
@classmethod
def find_by_auth_account_id(cls, auth_account_id: str):
"""Return a Account by id."""
return cls.query.filter_by(auth_account_id=str(auth_account_id)).one_or_none()
class PaymentAccountSchema(BaseSchema): # pylint: disable=too-many-ancestors
"""Main schema used to serialize the Payment Account."""
class Meta(BaseSchema.Meta): # pylint: disable=too-few-public-methods
"""Returns all the fields from the SQLAlchemy class."""
model = PaymentAccount
exclude = ['versions', 'pad_activation_date']
payment_method = fields.String(data_key='payment_method')
auth_account_id = fields.String(data_key='account_id')
name = fields.String(data_key='account_name')
| en | 0.838562 | # Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Model to handle all operations related to Payment Account data. # pylint: disable=too-many-instance-attributes This class manages all of the base data about Payment Account. # Account ID from auth, not present for FAS accounts. # used for sending out notifications.The statement emails needs account name # when this is enabled , send out the notifications # before this date , the account shouldn't get used Override to string. Return a Account by id. # pylint: disable=too-many-ancestors Main schema used to serialize the Payment Account. # pylint: disable=too-few-public-methods Returns all the fields from the SQLAlchemy class. | 2.20907 | 2 |
vonenet/params.py | comeeasy/VOneNet_FGSM_MNIST | 3 | 6624926 | <filename>vonenet/params.py
import numpy as np
from .utils import sample_dist
import scipy.stats as stats
def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
# Generates random sample
np.random.seed(seed)
phase_bins = np.array([0, 360])
phase_dist = np.array([1])
if rand_flag:
print('Uniform gabor parameters')
ori_bins = np.array([0, 180])
ori_dist = np.array([1])
nx_bins = np.array([0.1, 10**0.2])
nx_dist = np.array([1])
ny_bins = np.array([0.1, 10**0.2])
ny_dist = np.array([1])
# sf_bins = np.array([0.5, 8])
# sf_dist = np.array([1])
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
else:
print('Neuronal distributions gabor parameters')
# DeValois 1982a
ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
ori_dist = np.array([66, 49, 77, 54])
ori_dist = ori_dist / ori_dist.sum()
# Schiller 1976
cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
# Ringach 2002b
nx_bins = np.logspace(-1, 0.2, 6, base=10)
ny_bins = np.logspace(-1, 0.2, 6, base=10)
n_joint_dist = np.array([[2., 0., 1., 0., 0.],
[8., 9., 4., 1., 0.],
[1., 2., 19., 17., 3.],
[0., 0., 1., 7., 4.],
[0., 0., 0., 0., 0.]])
n_joint_dist = n_joint_dist / n_joint_dist.sum()
nx_dist = n_joint_dist.sum(axis=1)
nx_dist = nx_dist / nx_dist.sum()
ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
# DeValois 1982b
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
phase = sample_dist(phase_dist, phase_bins, features)
ori = sample_dist(ori_dist, ori_bins, features)
ori[ori < 0] = ori[ori < 0] + 180
if rand_flag:
sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
else:
samps = np.random.multivariate_normal([0, 0], cov_mat, features)
samps_cdf = stats.norm.cdf(samps)
nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
nx = 10**nx
ny_samp = np.random.rand(features)
ny = np.zeros(features)
for samp_ind, nx_samp in enumerate(nx):
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
np.log10(ny_bins))
ny = 10**ny
sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
sf = 2**sf
return sf, ori, phase, nx, ny
| <filename>vonenet/params.py
import numpy as np
from .utils import sample_dist
import scipy.stats as stats
def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
# Generates random sample
np.random.seed(seed)
phase_bins = np.array([0, 360])
phase_dist = np.array([1])
if rand_flag:
print('Uniform gabor parameters')
ori_bins = np.array([0, 180])
ori_dist = np.array([1])
nx_bins = np.array([0.1, 10**0.2])
nx_dist = np.array([1])
ny_bins = np.array([0.1, 10**0.2])
ny_dist = np.array([1])
# sf_bins = np.array([0.5, 8])
# sf_dist = np.array([1])
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
else:
print('Neuronal distributions gabor parameters')
# DeValois 1982a
ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
ori_dist = np.array([66, 49, 77, 54])
ori_dist = ori_dist / ori_dist.sum()
# Schiller 1976
cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
# Ringach 2002b
nx_bins = np.logspace(-1, 0.2, 6, base=10)
ny_bins = np.logspace(-1, 0.2, 6, base=10)
n_joint_dist = np.array([[2., 0., 1., 0., 0.],
[8., 9., 4., 1., 0.],
[1., 2., 19., 17., 3.],
[0., 0., 1., 7., 4.],
[0., 0., 0., 0., 0.]])
n_joint_dist = n_joint_dist / n_joint_dist.sum()
nx_dist = n_joint_dist.sum(axis=1)
nx_dist = nx_dist / nx_dist.sum()
ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
# DeValois 1982b
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
phase = sample_dist(phase_dist, phase_bins, features)
ori = sample_dist(ori_dist, ori_bins, features)
ori[ori < 0] = ori[ori < 0] + 180
if rand_flag:
sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
else:
samps = np.random.multivariate_normal([0, 0], cov_mat, features)
samps_cdf = stats.norm.cdf(samps)
nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
nx = 10**nx
ny_samp = np.random.rand(features)
ny = np.zeros(features)
for samp_ind, nx_samp in enumerate(nx):
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
np.log10(ny_bins))
ny = 10**ny
sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
sf = 2**sf
return sf, ori, phase, nx, ny
| en | 0.338014 | # Generates random sample # sf_bins = np.array([0.5, 8]) # sf_dist = np.array([1]) # DeValois 1982a # Schiller 1976 # Ringach 2002b # DeValois 1982b | 2.31737 | 2 |
networks.py | vish119/Neural-Network-for-XOR-and-Binary-Image-Classification | 0 | 6624927 | # sample_submission.py
import numpy as np
from scipy.special import expit
import sys
class xor_net(object):
"""
This code will train and test the Neural Network for XOR data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data
self.y = labels
maxiteration = 300000
if self.x.shape[0] <= 100:
learningrate = .001
maxiteration = 1000000
elif self.x.shape[0] <= 500:
learningrate = .0001
maxiteration = 500000
else:
learningrate = .00001
R = .01
xdimension = self.x.shape[1]
neuorons = 3
self.w = np.random.rand(xdimension + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
trainX = tempX[validsize:, :]
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
momentum = .9
prevloss = np.random.rand(self.w.shape[0], self.w.shape[1])
prevlossprime = np.random.rand(self.wprime.shape[0], self.wprime.shape[1])
while True:
u = np.dot(self.w.T, trainX.T)
h = expit(u)
temph = h
h = np.insert(h, 0, 1, axis=0)
h = np.array(h, dtype=np.float64)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration > 100000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, trainY.T), np.multiply(yprime, np.subtract(1, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
self.wprime = np.subtract(self.wprime, np.multiply(momentum, prevlossprime))
prevlossprime = lossprime.T
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1 - temph))
loss = learningrate * (trainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
self.w = np.subtract(self.w, np.multiply(momentum, prevloss))
prevloss = loss
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
class mlnn(object):
"""
This code will train and test the Neural Network for image data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data / 255.0
self.y = labels
maxiteration=40000
if self.x.shape[0]<=100:
learningrate = .0001
elif self.x.shape[0]<=500:
learningrate=.0001
else:
learningrate = .00001
if self.x.shape[0]>500:
maxiteration=15000
R = 0.01
neuorons = 100
self.w = 0.01 * np.random.rand(self.x.shape[1] + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
validsetX -= np.mean(validsetX, axis=0)
trainX = tempX[validsize:, :]
trainX -= np.mean(trainX, axis=0)
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = 0.01 * np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
while True:
randomTrainX = trainX
randomTrainY = trainY
h = 1.0 / (1.0 + np.exp(-1.0 * np.dot(self.w.T, randomTrainX.T)))
temph = h
h = np.insert(h, 0, 1, axis=0)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration>=10000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, randomTrainY.T), np.multiply(yprime, np.subtract(1.0, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1.0 - temph))
loss = learningrate * (randomTrainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
x = x / 255.0
x -= np.mean(x, axis=0)
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
if __name__ == '__main__':
pass
| # sample_submission.py
import numpy as np
from scipy.special import expit
import sys
class xor_net(object):
"""
This code will train and test the Neural Network for XOR data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data
self.y = labels
maxiteration = 300000
if self.x.shape[0] <= 100:
learningrate = .001
maxiteration = 1000000
elif self.x.shape[0] <= 500:
learningrate = .0001
maxiteration = 500000
else:
learningrate = .00001
R = .01
xdimension = self.x.shape[1]
neuorons = 3
self.w = np.random.rand(xdimension + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
trainX = tempX[validsize:, :]
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
momentum = .9
prevloss = np.random.rand(self.w.shape[0], self.w.shape[1])
prevlossprime = np.random.rand(self.wprime.shape[0], self.wprime.shape[1])
while True:
u = np.dot(self.w.T, trainX.T)
h = expit(u)
temph = h
h = np.insert(h, 0, 1, axis=0)
h = np.array(h, dtype=np.float64)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration > 100000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, trainY.T), np.multiply(yprime, np.subtract(1, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
self.wprime = np.subtract(self.wprime, np.multiply(momentum, prevlossprime))
prevlossprime = lossprime.T
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1 - temph))
loss = learningrate * (trainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
self.w = np.subtract(self.w, np.multiply(momentum, prevloss))
prevloss = loss
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
class mlnn(object):
"""
This code will train and test the Neural Network for image data.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
def __init__(self, data, labels):
self.x = data / 255.0
self.y = labels
maxiteration=40000
if self.x.shape[0]<=100:
learningrate = .0001
elif self.x.shape[0]<=500:
learningrate=.0001
else:
learningrate = .00001
if self.x.shape[0]>500:
maxiteration=15000
R = 0.01
neuorons = 100
self.w = 0.01 * np.random.rand(self.x.shape[1] + 1, neuorons)
tempX = np.insert(self.x, 0, 1, axis=1)
tempX = np.array(tempX, dtype=np.float64)
validsize = int(.2 * len(self.x))
validsetX = tempX[0:validsize, :]
validsetX -= np.mean(validsetX, axis=0)
trainX = tempX[validsize:, :]
trainX -= np.mean(trainX, axis=0)
validsetY = self.y[0:validsize]
trainY = self.y[validsize:]
previouserror = sys.maxint
count = 0
self.wprime = 0.01 * np.random.rand(neuorons + 1, 1)
finalW = self.w
finalWprime = self.wprime
iteration = 0
while True:
randomTrainX = trainX
randomTrainY = trainY
h = 1.0 / (1.0 + np.exp(-1.0 * np.dot(self.w.T, randomTrainX.T)))
temph = h
h = np.insert(h, 0, 1, axis=0)
uprime = np.dot(self.wprime.T, h)
yprime = expit(uprime)
uvalid = np.dot(self.w.T, validsetX.T)
hvalid = expit(uvalid)
hvalid = np.insert(hvalid, 0, 1, axis=0)
uvalidprime = np.dot(self.wprime.T, hvalid)
yvalidprime = expit(uvalidprime)
currenterror = (np.mean((validsetY - yvalidprime) ** 2)) / 2
if iteration >= maxiteration:
finalW = self.w
finalWprime = self.wprime
break
if currenterror > previouserror:
if count == 0:
finalW = self.w
finalWprime = self.wprime
count = count + 1
if count >= 10 and iteration>=10000:
break
else:
count = 0
previouserror = currenterror
regwprime = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.wprime)))
l2delta = np.multiply(np.subtract(yprime, randomTrainY.T), np.multiply(yprime, np.subtract(1.0, yprime)))
lossprime = np.multiply(learningrate, np.dot(l2delta, h.T))
self.wprime = np.subtract(self.wprime, lossprime.T)
self.wprime = np.subtract(self.wprime, regwprime)
tempWprime = self.wprime[1:]
regw = np.multiply(learningrate, np.multiply(2, np.multiply(R, self.w)))
l1delta = (l2delta.T.dot(tempWprime.T)).T * (temph * (1.0 - temph))
loss = learningrate * (randomTrainX.T.dot(l1delta.T))
self.w = np.subtract(self.w, loss)
self.w = np.subtract(self.w, regw)
iteration = iteration + 1
self.w = finalW
self.wprime = finalWprime
self.params = [(self.w[0, :], self.w[1:, :]), (self.wprime[0], self.wprime[1:])] # [(w,b),(w,b)]
def get_params(self):
"""
This code will return Weights and Bias of the trained network.
Returns:
tuple of numpy.ndarray: (w, b).
"""
return self.params
def get_predictions(self, x):
"""
This method will return prediction for unseen data.
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
"""
x = x / 255.0
x -= np.mean(x, axis=0)
testX = np.insert(x, 0, 1, axis=1)
utest = np.dot(self.w.T, testX.T)
htest = expit(utest)
htest = np.insert(htest, 0, 1, axis=0)
utestprime = np.dot(self.wprime.T, htest)
ytestprime = expit(utestprime)
predY = ytestprime > .5
predY = predY.astype(int)
predY = predY.flatten()
return predY
if __name__ == '__main__':
pass
| en | 0.87454 | # sample_submission.py This code will train and test the Neural Network for XOR data. Args: data: Is a tuple, ``(x,y)`` ``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent data and data is spread along axis 1. If the array had only one dimension, it implies that data is 1D. ``y`` is a 1D ndarray it will be of the same length as axis 0 or x. # [(w,b),(w,b)] This code will return Weights and Bias of the trained network. Returns: tuple of numpy.ndarray: (w, b). This method will return prediction for unseen data. Args: x: array similar to ``x`` in ``data``. Might be of different size. Returns: numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of ``x`` This code will train and test the Neural Network for image data. Args: data: Is a tuple, ``(x,y)`` ``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent data and data is spread along axis 1. If the array had only one dimension, it implies that data is 1D. ``y`` is a 1D ndarray it will be of the same length as axis 0 or x. # [(w,b),(w,b)] This code will return Weights and Bias of the trained network. Returns: tuple of numpy.ndarray: (w, b). This method will return prediction for unseen data. Args: x: array similar to ``x`` in ``data``. Might be of different size. Returns: numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of ``x`` | 3.585412 | 4 |
ibm_db_django/jybase.py | SabaKauser/python-ibmdb-django | 0 | 6624928 | <filename>ibm_db_django/jybase.py
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2017. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: <NAME>, <NAME>, <NAME> |
# +--------------------------------------------------------------------------+
# Importing necessary classes
try:
from com.ziclix.python.sql import zxJDBC, PyConnection, DataHandler, PyCursor
import datetime, decimal
from java.sql import Connection
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured( "Error loading zxJDBC module: %s" % e )
# For checking django's version
from django import VERSION as djangoVersion
DatabaseError = zxJDBC.DatabaseError
IntegrityError = zxJDBC.IntegrityError
if ( djangoVersion[0:2] >= ( 1, 6 )):
Error = zxJDBC.Error
InterfaceError = zxJDBC.InterfaceError
DataError = zxJDBC.DataError
OperationalError = zxJDBC.OperationalError
InternalError = zxJDBC.InternalError
ProgrammingError = zxJDBC.ProgrammingError
NotSupportedError = zxJDBC.NotSupportedError
class DatabaseWrapper( object ):
# Get new database connection for non persistance connection
def get_new_connection(self, kwargs):
self.connectionFactory = kwargs.get( 'options' ) and kwargs.get( 'options' ).get( 'CONNECTION_FACTORY' ) or None
if self.connectionFactory:
con = self.connectionFactory.getConnection()
connection = PyConnection( con )
else:
host = kwargs.get( 'host' ) or 'localhost'
port = kwargs.get( 'port' ) and ( ':%s' % kwargs.get( 'port' )) or ''
DriverType = 4
kwargsKeys = kwargs.keys()
if kwargsKeys.__contains__( 'DriverType' ):
DriverType = kwargs['DriverType']
if DriverType == 4:
conn_string = "jdbc:db2://%s%s/%s" % ( host, port, kwargs.get( 'database' ) )
elif DriverType == 2:
conn_string = "jdbc:db2:%s" % ( kwargs.get( 'database' ) )
else:
raise ImproperlyConfigured( "Wrong Driver type" )
# for Setting default AUTO COMMIT off on connection.
autocommit = False
if kwargs.get( 'options' ) and kwargs.get( 'options' ).keys().__contains__( 'autocommit' ):
autocommit = kwargs.get( 'options' ).get( 'autocommit' )
del kwargs.get( 'options' )['autocommit']
connection = zxJDBC.connect( conn_string,
kwargs.get( 'user' ),
kwargs.get( 'password' ),
'com.ibm.db2.jcc.DB2Driver', kwargs.get( 'options' ) )
# To prevent dirty reads
self.__prevent_dirty_reads( connection )
connection.__connection__.setAutoCommit( autocommit )
return connection
def is_active( self, connection ):
cursor = connection.cursor()
try:
cursor.execute("select 1 from sysibm.sysdummy1")
return True
except:
return False
# Over-riding _cursor method to return DB2 cursor.
def _cursor( self, connection):
return DB2CursorWrapper( connection.cursor() )
def close( self, connection ):
if self.connectionFactory:
self.connectionFactory.closeConnection( connection.__connection__ )
else:
# db2 fails if active transaction ....
connection.rollback()
connection.close()
#prohibits a transaction from reading a row with uncommitted changes in it.
def __prevent_dirty_reads( self, connection ):
JDBC_conn = connection.__connection__
JDBC_conn.setTransactionIsolation( Connection.TRANSACTION_READ_COMMITTED )
def get_server_version( self, connection ):
version = connection.dbversion.split( "SQL" )[1]
return ( int( version[:2] ), int( version[2:4] ), int( version[4:] ) )
class DB2CursorWrapper( object ):
def __init__( self, cursor ):
self.cursor = cursor
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def execute( self, operation, parameters = () ):
try:
operation = operation % ( tuple( "?" * operation.count( "%s" ) ) )
if operation.endswith( ';' ) or operation.endswith( '/' ):
operation = operation[:-1]
returnValue = self.cursor.execute( operation, parameters )
if ( self.cursor.updatecount is not None ) and self.cursor.updatecount != -1:
self.rowcount = self.cursor.updatecount
else:
self.rowcount = self.cursor.rowcount
return returnValue
except ( TypeError ):
return None
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def executemany( self, operation, seq_parameters ):
try:
operation = operation % ( tuple( "?" * operation.count( "%s" ) ) )
if operation.endswith( ';' ) or operation.endswith( '/' ):
operation = operation[:-1]
returnValue = self.cursor.executemany( operation, seq_parameters )
if ( self.cursor.updatecount is not None ) and self.cursor.updatecount != -1:
self.rowcount = self.cursor.updatecount
else:
self.rowcount = self.cursor.rowcount
return returnValue
except ( IndexError, TypeError ):
return None
def fetchone( self, ):
row = self.cursor.fetchone()
return self._fix_return_data_type( row )
def fetchmany( self, size = 0 ):
if size == 0:
size = self.arraysize
row_list = []
for row in self.cursor.fetchmany( size ):
row_list.append( self._fix_return_data_type( row ) )
return row_list
def fetchall( self ):
row_list = []
for row in self.cursor.fetchall():
row_list.append( self._fix_return_data_type( row ) )
return row_list
def __getattr__( self, attr ):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr( self.cursor, attr )
# This method is used to convert a string representing date/time
# and binary data in a row tuple fetched from the database
# to date/time and binary objects, for returning it to the user.
def _fix_return_data_type( self, row ):
row = list( row )
description = self.cursor.description
for index in range( len( row ) ):
if row[index] is not None:
if description[index][1] == zxJDBC.BLOB:
row[index] = buffer( row[index] )
elif description[index][1] == zxJDBC.DECIMAL:
row[index] = decimal.Decimal( str( row[index] ).replace( ",", "." ) )
return tuple( row )
| <filename>ibm_db_django/jybase.py
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2017. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: <NAME>, <NAME>, <NAME> |
# +--------------------------------------------------------------------------+
# Importing necessary classes
try:
from com.ziclix.python.sql import zxJDBC, PyConnection, DataHandler, PyCursor
import datetime, decimal
from java.sql import Connection
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured( "Error loading zxJDBC module: %s" % e )
# For checking django's version
from django import VERSION as djangoVersion
DatabaseError = zxJDBC.DatabaseError
IntegrityError = zxJDBC.IntegrityError
if ( djangoVersion[0:2] >= ( 1, 6 )):
Error = zxJDBC.Error
InterfaceError = zxJDBC.InterfaceError
DataError = zxJDBC.DataError
OperationalError = zxJDBC.OperationalError
InternalError = zxJDBC.InternalError
ProgrammingError = zxJDBC.ProgrammingError
NotSupportedError = zxJDBC.NotSupportedError
class DatabaseWrapper( object ):
# Get new database connection for non persistance connection
def get_new_connection(self, kwargs):
self.connectionFactory = kwargs.get( 'options' ) and kwargs.get( 'options' ).get( 'CONNECTION_FACTORY' ) or None
if self.connectionFactory:
con = self.connectionFactory.getConnection()
connection = PyConnection( con )
else:
host = kwargs.get( 'host' ) or 'localhost'
port = kwargs.get( 'port' ) and ( ':%s' % kwargs.get( 'port' )) or ''
DriverType = 4
kwargsKeys = kwargs.keys()
if kwargsKeys.__contains__( 'DriverType' ):
DriverType = kwargs['DriverType']
if DriverType == 4:
conn_string = "jdbc:db2://%s%s/%s" % ( host, port, kwargs.get( 'database' ) )
elif DriverType == 2:
conn_string = "jdbc:db2:%s" % ( kwargs.get( 'database' ) )
else:
raise ImproperlyConfigured( "Wrong Driver type" )
# for Setting default AUTO COMMIT off on connection.
autocommit = False
if kwargs.get( 'options' ) and kwargs.get( 'options' ).keys().__contains__( 'autocommit' ):
autocommit = kwargs.get( 'options' ).get( 'autocommit' )
del kwargs.get( 'options' )['autocommit']
connection = zxJDBC.connect( conn_string,
kwargs.get( 'user' ),
kwargs.get( 'password' ),
'com.ibm.db2.jcc.DB2Driver', kwargs.get( 'options' ) )
# To prevent dirty reads
self.__prevent_dirty_reads( connection )
connection.__connection__.setAutoCommit( autocommit )
return connection
def is_active( self, connection ):
cursor = connection.cursor()
try:
cursor.execute("select 1 from sysibm.sysdummy1")
return True
except:
return False
# Over-riding _cursor method to return DB2 cursor.
def _cursor( self, connection):
return DB2CursorWrapper( connection.cursor() )
def close( self, connection ):
if self.connectionFactory:
self.connectionFactory.closeConnection( connection.__connection__ )
else:
# db2 fails if active transaction ....
connection.rollback()
connection.close()
#prohibits a transaction from reading a row with uncommitted changes in it.
def __prevent_dirty_reads( self, connection ):
JDBC_conn = connection.__connection__
JDBC_conn.setTransactionIsolation( Connection.TRANSACTION_READ_COMMITTED )
def get_server_version( self, connection ):
version = connection.dbversion.split( "SQL" )[1]
return ( int( version[:2] ), int( version[2:4] ), int( version[4:] ) )
class DB2CursorWrapper( object ):
def __init__( self, cursor ):
self.cursor = cursor
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def execute( self, operation, parameters = () ):
try:
operation = operation % ( tuple( "?" * operation.count( "%s" ) ) )
if operation.endswith( ';' ) or operation.endswith( '/' ):
operation = operation[:-1]
returnValue = self.cursor.execute( operation, parameters )
if ( self.cursor.updatecount is not None ) and self.cursor.updatecount != -1:
self.rowcount = self.cursor.updatecount
else:
self.rowcount = self.cursor.rowcount
return returnValue
except ( TypeError ):
return None
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def executemany( self, operation, seq_parameters ):
try:
operation = operation % ( tuple( "?" * operation.count( "%s" ) ) )
if operation.endswith( ';' ) or operation.endswith( '/' ):
operation = operation[:-1]
returnValue = self.cursor.executemany( operation, seq_parameters )
if ( self.cursor.updatecount is not None ) and self.cursor.updatecount != -1:
self.rowcount = self.cursor.updatecount
else:
self.rowcount = self.cursor.rowcount
return returnValue
except ( IndexError, TypeError ):
return None
def fetchone( self, ):
row = self.cursor.fetchone()
return self._fix_return_data_type( row )
def fetchmany( self, size = 0 ):
if size == 0:
size = self.arraysize
row_list = []
for row in self.cursor.fetchmany( size ):
row_list.append( self._fix_return_data_type( row ) )
return row_list
def fetchall( self ):
row_list = []
for row in self.cursor.fetchall():
row_list.append( self._fix_return_data_type( row ) )
return row_list
def __getattr__( self, attr ):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr( self.cursor, attr )
# This method is used to convert a string representing date/time
# and binary data in a row tuple fetched from the database
# to date/time and binary objects, for returning it to the user.
def _fix_return_data_type( self, row ):
row = list( row )
description = self.cursor.description
for index in range( len( row ) ):
if row[index] is not None:
if description[index][1] == zxJDBC.BLOB:
row[index] = buffer( row[index] )
elif description[index][1] == zxJDBC.DECIMAL:
row[index] = decimal.Decimal( str( row[index] ).replace( ",", "." ) )
return tuple( row )
| en | 0.710044 | # +--------------------------------------------------------------------------+ # | Licensed Materials - Property of IBM | # | | # | (C) Copyright IBM Corporation 2009-2017. | # +--------------------------------------------------------------------------+ # | This module complies with Django 1.0 and is | # | Licensed under the Apache License, Version 2.0 (the "License"); | # | you may not use this file except in compliance with the License. | # | You may obtain a copy of the License at | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | # | law or agreed to in writing, software distributed under the License is | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | # | KIND, either express or implied. See the License for the specific | # | language governing permissions and limitations under the License. | # +--------------------------------------------------------------------------+ # | Authors: <NAME>, <NAME>, <NAME> | # +--------------------------------------------------------------------------+ # Importing necessary classes # For checking django's version # Get new database connection for non persistance connection # for Setting default AUTO COMMIT off on connection. # To prevent dirty reads # Over-riding _cursor method to return DB2 cursor. # db2 fails if active transaction .... #prohibits a transaction from reading a row with uncommitted changes in it. # Over-riding this method to modify SQLs which contains format parameter to qmark. # Over-riding this method to modify SQLs which contains format parameter to qmark. # This method is used to convert a string representing date/time # and binary data in a row tuple fetched from the database # to date/time and binary objects, for returning it to the user. | 1.514403 | 2 |
tests/test_dtmp.py | rcurrie/rest-template | 0 | 6624929 | import requests
server = "http://localhost:5000/"
def test_hello():
"""
Test hello endpoint
"""
r = requests.get(server + "hello")
assert(r.status_code == requests.codes.ok)
assert(r.text == "hello")
| import requests
server = "http://localhost:5000/"
def test_hello():
"""
Test hello endpoint
"""
r = requests.get(server + "hello")
assert(r.status_code == requests.codes.ok)
assert(r.text == "hello")
| en | 0.311012 | Test hello endpoint | 2.83812 | 3 |
spark_auto_mapper_fhir/value_sets/table_cell_scope.py | imranq2/SparkAutoMapper.FHIR | 1 | 6624930 | <filename>spark_auto_mapper_fhir/value_sets/table_cell_scope.py
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class TableCellScope(GenericTypeCode):
"""
v3.TableCellScope
From: http://terminology.hl7.org/ValueSet/v3-TableCellScope in v3-codesystems.xml
These values are defined within the XHTML 4.0 Table Model
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-TableCellScope
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-TableCellScope"
class TableCellScopeValues:
"""
col
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Col = TableCellScope("col")
"""
colgroup
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Colgroup = TableCellScope("colgroup")
"""
row
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Row = TableCellScope("row")
"""
rowgroup
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Rowgroup = TableCellScope("rowgroup")
| <filename>spark_auto_mapper_fhir/value_sets/table_cell_scope.py
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class TableCellScope(GenericTypeCode):
"""
v3.TableCellScope
From: http://terminology.hl7.org/ValueSet/v3-TableCellScope in v3-codesystems.xml
These values are defined within the XHTML 4.0 Table Model
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-TableCellScope
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-TableCellScope"
class TableCellScopeValues:
"""
col
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Col = TableCellScope("col")
"""
colgroup
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Colgroup = TableCellScope("colgroup")
"""
row
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Row = TableCellScope("row")
"""
rowgroup
From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml
"""
Rowgroup = TableCellScope("rowgroup")
| en | 0.360376 | # This file is auto-generated by generate_classes so do not edit manually # noinspection PyPep8Naming v3.TableCellScope From: http://terminology.hl7.org/ValueSet/v3-TableCellScope in v3-codesystems.xml These values are defined within the XHTML 4.0 Table Model http://terminology.hl7.org/CodeSystem/v3-TableCellScope col From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml colgroup From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml row From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml rowgroup From: http://terminology.hl7.org/CodeSystem/v3-TableCellScope in v3-codesystems.xml | 1.900437 | 2 |
monitor/tests/test_models/test_tweet_response.py | arineto/twitter_monitor | 1 | 6624931 | <reponame>arineto/twitter_monitor
from django.test import TestCase
from model_mommy import mommy
class TestTweetResponse(TestCase):
def setUp(self):
self.response = mommy.make('monitor.TweetResponse')
def test__str__(self):
self.assertEqual(
str(self.response),
'{} ({})'.format(str(self.response.user), str(self.response.tweet))
)
| from django.test import TestCase
from model_mommy import mommy
class TestTweetResponse(TestCase):
def setUp(self):
self.response = mommy.make('monitor.TweetResponse')
def test__str__(self):
self.assertEqual(
str(self.response),
'{} ({})'.format(str(self.response.user), str(self.response.tweet))
) | none | 1 | 2.563984 | 3 | |
zerver/tornado/event_queue.py | narendrapsgim/zulip | 17,004 | 6624932 | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import atexit
import copy
import logging
import os
import random
import signal
import sys
import time
import traceback
from collections import deque
from dataclasses import asdict
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Deque,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import orjson
import tornado.ioloop
from django.conf import settings
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from version import API_FEATURE_LEVEL, ZULIP_MERGE_BASE, ZULIP_VERSION
from zerver.decorator import cachify
from zerver.lib.exceptions import JsonableError
from zerver.lib.message import MessageDict
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.notification_data import UserMessageNotificationsData
from zerver.lib.queue import queue_json_publish, retry_event
from zerver.lib.utils import statsd
from zerver.middleware import async_request_timer_restart
from zerver.tornado.autoreload import add_reload_hook
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.handlers import (
clear_handler_by_id,
finish_handler,
get_handler_by_id,
handler_stats_string,
)
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
# We garbage-collect every minute; this is totally fine given that the
# GC scan takes ~2ms with 1000 event queues.
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 1
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
def create_heartbeat_event() -> Dict[str, str]:
return dict(type="heartbeat")
class ClientDescriptor:
def __init__(
self,
user_profile_id: int,
realm_id: int,
event_queue: "EventQueue",
event_types: Optional[Sequence[str]],
client_type_name: str,
apply_markdown: bool = True,
client_gravatar: bool = True,
slim_presence: bool = False,
all_public_streams: bool = False,
lifespan_secs: int = 0,
narrow: Collection[Sequence[str]] = [],
bulk_message_deletion: bool = False,
stream_typing_notifications: bool = False,
user_settings_object: bool = False,
) -> None:
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.realm_id = realm_id
self.current_handler_id: Optional[int] = None
self.current_client_name: Optional[str] = None
self.event_queue = event_queue
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.slim_presence = slim_presence
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle: Any = None # TODO: should be return type of ioloop.call_later
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
self.bulk_message_deletion = bulk_message_deletion
self.stream_typing_notifications = stream_typing_notifications
self.user_settings_object = user_settings_object
# Default for lifespan_secs is DEFAULT_EVENT_QUEUE_TIMEOUT_SECS;
# but users can set it as high as MAX_QUEUE_TIMEOUT_SECS.
if lifespan_secs == 0:
lifespan_secs = DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
self.queue_timeout = min(lifespan_secs, MAX_QUEUE_TIMEOUT_SECS)
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(
user_profile_id=self.user_profile_id,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
client_gravatar=self.client_gravatar,
slim_presence=self.slim_presence,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name,
bulk_message_deletion=self.bulk_message_deletion,
stream_typing_notifications=self.stream_typing_notifications,
user_settings_object=self.user_settings_object,
)
def __repr__(self) -> str:
return f"ClientDescriptor<{self.event_queue.id}>"
@classmethod
def from_dict(cls, d: MutableMapping[str, Any]) -> "ClientDescriptor":
if "client_type" in d:
# Temporary migration for the rename of client_type to client_type_name
d["client_type_name"] = d["client_type"]
if "client_gravatar" not in d:
# Temporary migration for the addition of the client_gravatar field
d["client_gravatar"] = False
if "slim_presence" not in d:
d["slim_presence"] = False
ret = cls(
d["user_profile_id"],
d["realm_id"],
EventQueue.from_dict(d["event_queue"]),
d["event_types"],
d["client_type_name"],
d["apply_markdown"],
d["client_gravatar"],
d["slim_presence"],
d["all_public_streams"],
d["queue_timeout"],
d.get("narrow", []),
d.get("bulk_message_deletion", False),
d.get("stream_typing_notifications", False),
d.get("user_settings_object", False),
)
ret.last_connection_time = d["last_connection_time"]
return ret
def add_event(self, event: Mapping[str, Any]) -> None:
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_timer_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self) -> bool:
if self.current_handler_id is not None:
err_msg = f"Got error finishing handler for queue {self.event_queue.id}"
try:
finish_handler(
self.current_handler_id,
self.event_queue.id,
self.event_queue.contents(),
self.apply_markdown,
)
except Exception:
logging.exception(err_msg, stack_info=True)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event: Mapping[str, Any]) -> bool:
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
if event["type"] == "typing" and "stream_id" in event:
# Typing notifications for stream messages are only
# delivered if the stream_typing_notifications
# client_capability is enabled, for backwards compatibility.
return self.stream_typing_notifications
if self.user_settings_object and event["type"] in [
"update_display_settings",
"update_global_notifications",
]:
# 'update_display_settings' and 'update_global_notifications'
# events are sent only if user_settings_object is False,
# otherwise only 'user_settings' event is sent.
return False
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self) -> bool:
return self.event_types is None or "message" in self.event_types
def expired(self, now: float) -> bool:
return (
self.current_handler_id is None
and now - self.last_connection_time >= self.queue_timeout
)
def connect_handler(self, handler_id: int, client_name: str) -> None:
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback() -> None:
self._timeout_handle = None
# All clients get heartbeat events
heartbeat_event = create_heartbeat_event()
self.add_event(heartbeat_event)
ioloop = tornado.ioloop.IOLoop.instance()
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != "API: heartbeat test":
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
def disconnect_handler(self, client_closed: bool = False) -> None:
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info(
"Client disconnected for queue %s (%s via %s)",
self.event_queue.id,
self.user_profile_id,
self.current_client_name,
)
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self) -> None:
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can clean up their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id}, {self.realm_id})
def compute_full_event_type(event: Mapping[str, Any]) -> str:
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/{}/{}".format(event["flag"], event["operation"])
return "flags/{}/{}".format(event["operation"], event["flag"])
return event["type"]
class EventQueue:
def __init__(self, id: str) -> None:
# When extending this list of properties, one must be sure to
# update to_dict and from_dict.
self.queue: Deque[Dict[str, Any]] = deque()
self.next_event_id: int = 0
# will only be None for migration from old versions
self.newest_pruned_id: Optional[int] = -1
self.id: str = id
self.virtual_events: Dict[str, Dict[str, Any]] = {}
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
d = dict(
id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events,
)
if self.newest_pruned_id is not None:
d["newest_pruned_id"] = self.newest_pruned_id
return d
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "EventQueue":
ret = cls(d["id"])
ret.next_event_id = d["next_event_id"]
ret.newest_pruned_id = d.get("newest_pruned_id", None)
ret.queue = deque(d["queue"])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, orig_event: Mapping[str, Any]) -> None:
# By default, we make a shallow copy of the event dictionary
# to push into the target event queue; this allows the calling
# code to send the same "event" object to multiple queues.
# This behavior is important because the event_queue system is
# about to mutate the event dictionary, minimally to add the
# event_id attribute.
event = dict(orig_event)
event["id"] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if full_event_type == "restart" or full_event_type.startswith("flags/"):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self) -> Dict[str, Any]:
return self.queue.popleft()
def empty(self) -> bool:
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id: int) -> None:
while len(self.queue) != 0 and self.queue[0]["id"] <= through_id:
self.newest_pruned_id = self.queue[0]["id"]
self.pop()
def contents(self, include_internal_data: bool = False) -> List[Dict[str, Any]]:
contents: List[Dict[str, Any]] = []
virtual_id_map: Dict[str, Dict[str, Any]] = {}
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(virtual_id_map.keys())
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
if include_internal_data:
return contents
return prune_internal_data(contents)
def prune_internal_data(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Prunes the internal_data data structures, which are not intended to
be exposed to API clients.
"""
events = copy.deepcopy(events)
for event in events:
if event["type"] == "message" and "internal_data" in event:
del event["internal_data"]
return events
# maps queue ids to client descriptors
clients: Dict[str, ClientDescriptor] = {}
# maps user id to list of client descriptors
user_clients: Dict[int, List[ClientDescriptor]] = {}
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams: Dict[int, List[ClientDescriptor]] = {}
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks: List[Callable[[int, ClientDescriptor, bool], None]] = []
next_queue_id = 0
def clear_client_event_queues_for_testing() -> None:
assert settings.TEST_SUITE
clients.clear()
user_clients.clear()
realm_clients_all_streams.clear()
gc_hooks.clear()
global next_queue_id
next_queue_id = 0
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
gc_hooks.append(hook)
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
try:
return clients[queue_id]
except KeyError:
raise BadEventQueueIdError(queue_id)
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client: ClientDescriptor) -> None:
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ":" + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(
to_remove: AbstractSet[str], affected_users: AbstractSet[int], affected_realms: AbstractSet[int]
) -> None:
def filter_client_dict(
client_dict: MutableMapping[int, List[ClientDescriptor]], key: int
) -> None:
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(
clients[id].user_profile_id,
clients[id],
clients[id].user_profile_id not in user_clients,
)
del clients[id]
def gc_event_queues(port: int) -> None:
start = time.time()
to_remove: Set[str] = set()
affected_users: Set[int] = set()
affected_realms: Set[int] = set()
for (id, client) in clients.items():
if client.expired(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle (because
# they are expired) and thus not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
if settings.PRODUCTION:
logging.info(
"Tornado %d removed %d expired event queues owned by %d users in %.3fs."
" Now %d active queues, %s",
port,
len(to_remove),
len(affected_users),
time.time() - start,
len(clients),
handler_stats_string(),
)
statsd.gauge("tornado.active_queues", len(clients))
statsd.gauge("tornado.active_users", len(user_clients))
def persistent_queue_filename(port: int, last: bool = False) -> str:
if settings.TORNADO_PROCESSES == 1:
# Use non-port-aware, legacy version.
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("",) + ".last"
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("",)
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("." + str(port) + ".last",)
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("." + str(port),)
def dump_event_queues(port: int) -> None:
start = time.time()
with open(persistent_queue_filename(port), "wb") as stored_queues:
stored_queues.write(
orjson.dumps([(qid, client.to_dict()) for (qid, client) in clients.items()])
)
if len(clients) > 0 or settings.PRODUCTION:
logging.info(
"Tornado %d dumped %d event queues in %.3fs", port, len(clients), time.time() - start
)
def load_event_queues(port: int) -> None:
global clients
start = time.time()
try:
with open(persistent_queue_filename(port), "rb") as stored_queues:
data = orjson.loads(stored_queues.read())
except FileNotFoundError:
pass
except orjson.JSONDecodeError:
logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
else:
try:
clients = {qid: ClientDescriptor.from_dict(client) for (qid, client) in data}
except Exception:
logging.exception(
"Tornado %d could not deserialize event queues", port, stack_info=True
)
for client in clients.values():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
if len(clients) > 0 or settings.PRODUCTION:
logging.info(
"Tornado %d loaded %d event queues in %.3fs", port, len(clients), time.time() - start
)
def send_restart_events(immediate: bool = False) -> None:
event: Dict[str, Any] = dict(
type="restart",
zulip_version=ZULIP_VERSION,
zulip_merge_base=ZULIP_MERGE_BASE,
zulip_feature_level=API_FEATURE_LEVEL,
server_generation=settings.SERVER_GENERATION,
)
if immediate:
event["immediate"] = True
for client in clients.values():
if client.accepts_event(event):
client.add_event(event)
def setup_event_queue(port: int) -> None:
if not settings.TEST_SUITE:
load_event_queues(port)
atexit.register(dump_event_queues, port)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
add_reload_hook(lambda: dump_event_queues(port))
try:
os.rename(persistent_queue_filename(port), persistent_queue_filename(port, last=True))
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(
lambda: gc_event_queues(port), EVENT_QUEUE_GC_FREQ_MSECS, ioloop
)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
queue_id: Optional[str] = query["queue_id"]
dont_block: bool = query["dont_block"]
last_event_id: Optional[int] = query["last_event_id"]
user_profile_id: int = query["user_profile_id"]
new_queue_data: Optional[MutableMapping[str, Any]] = query.get("new_queue_data")
client_type_name: str = query["client_type_name"]
handler_id: int = query["handler_id"]
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
assert new_queue_data is not None
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
if (
client.event_queue.newest_pruned_id is not None
and last_event_id < client.event_queue.newest_pruned_id
):
raise JsonableError(
_("An event newer than {event_id} has already been pruned!").format(
event_id=last_event_id,
)
)
client.event_queue.prune(last_event_id)
if (
client.event_queue.newest_pruned_id is not None
and last_event_id != client.event_queue.newest_pruned_id
):
raise JsonableError(
_("Event {event_id} was not in this queue").format(
event_id=last_event_id,
)
)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response: Dict[str, Any] = dict(
events=client.event_queue.contents(),
)
if orig_queue_id is None:
response["queue_id"] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[{}/{}/{}]".format(
queue_id, len(response["events"]), response["events"][0]["type"]
)
else:
extra_log_data = "[{}/{}]".format(queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info(
"Disconnected handler for queue %s (%s/%s)",
queue_id,
user_profile_id,
client_type_name,
)
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
return {
"user_profile_id": user_profile_id,
"message_id": message_id,
}
def missedmessage_hook(
user_profile_id: int, client: ClientDescriptor, last_for_client: bool
) -> None:
"""The receiver_is_off_zulip logic used to determine whether a user
has no active client suffers from a somewhat fundamental race
condition. If the client is no longer on the Internet,
receiver_is_off_zulip will still return False for
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
garbage-collected. This would cause us to reliably miss
push/email notifying users for messages arriving during the
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
example). We address this by, when the queue is garbage-collected
at the end of those 10 minutes, checking to see if it's the last
one, and if so, potentially triggering notifications to the user
at that time, resulting in at most a DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
delay in the arrival of their notifications.
As Zulip's APIs get more popular and the mobile apps start using
long-lived event queues for perf optimization, future versions of
this will likely need to replace checking `last_for_client` with
something more complicated, so that we only consider clients like
web browsers, not the mobile apps or random API scripts.
"""
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
for event in client.event_queue.contents(include_internal_data=True):
if event["type"] != "message":
continue
internal_data = event.get("internal_data", {})
sender_id = event["message"]["sender_id"]
user_notifications_data = UserMessageNotificationsData(
user_id=user_profile_id,
sender_is_muted=internal_data.get("sender_is_muted", False),
pm_push_notify=internal_data.get("pm_push_notify", False),
pm_email_notify=internal_data.get("pm_email_notify", False),
mention_push_notify=internal_data.get("mention_push_notify", False),
mention_email_notify=internal_data.get("mention_email_notify", False),
wildcard_mention_push_notify=internal_data.get("wildcard_mention_push_notify", False),
wildcard_mention_email_notify=internal_data.get("wildcard_mention_email_notify", False),
stream_push_notify=internal_data.get("stream_push_notify", False),
stream_email_notify=internal_data.get("stream_email_notify", False),
# Since one is by definition idle, we don't need to check online_push_enabled
online_push_enabled=False,
)
mentioned_user_group_id = internal_data.get("mentioned_user_group_id")
# Since we just GC'd the last event queue, the user is definitely idle.
idle = True
message_id = event["message"]["id"]
# Pass on the information on whether a push or email notification was already sent.
already_notified = dict(
push_notified=internal_data.get("push_notified", False),
email_notified=internal_data.get("email_notified", False),
)
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
acting_user_id=sender_id,
message_id=message_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified=already_notified,
)
def receiver_is_off_zulip(user_profile_id: int) -> bool:
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them.
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [
client for client in all_client_descriptors if client.accepts_messages()
]
off_zulip = len(message_event_queues) == 0
return off_zulip
def maybe_enqueue_notifications(
*,
user_notifications_data: UserMessageNotificationsData,
acting_user_id: int,
message_id: int,
mentioned_user_group_id: Optional[int],
idle: bool,
already_notified: Dict[str, bool],
) -> Dict[str, bool]:
"""This function has a complete unit test suite in
`test_enqueue_notifications` that should be expanded as we add
more features here.
See https://zulip.readthedocs.io/en/latest/subsystems/notifications.html
for high-level design documentation.
"""
notified: Dict[str, bool] = {}
if user_notifications_data.is_push_notifiable(acting_user_id, idle):
notice = build_offline_notification(user_notifications_data.user_id, message_id)
notice["trigger"] = user_notifications_data.get_push_notification_trigger(
acting_user_id, idle
)
notice["type"] = "add"
notice["mentioned_user_group_id"] = mentioned_user_group_id
if not already_notified.get("push_notified"):
queue_json_publish("missedmessage_mobile_notifications", notice)
notified["push_notified"] = True
# Send missed_message emails if a private message or a
# mention. Eventually, we'll add settings to allow email
# notifications to match the model of push notifications
# above.
if user_notifications_data.is_email_notifiable(acting_user_id, idle):
notice = build_offline_notification(user_notifications_data.user_id, message_id)
notice["trigger"] = user_notifications_data.get_email_notification_trigger(
acting_user_id, idle
)
notice["mentioned_user_group_id"] = mentioned_user_group_id
if not already_notified.get("email_notified"):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified["email_notified"] = True
return notified
class ClientInfo(TypedDict):
client: ClientDescriptor
flags: Collection[str]
is_sender: bool
def get_client_info_for_message_event(
event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]
) -> Dict[str, ClientInfo]:
"""
Return client info for all the clients interested in a message.
This basically includes clients for users who are recipients
of the message, with some nuances for bots that auto-subscribe
to all streams, plus users who may be mentioned, etc.
"""
send_to_clients: Dict[str, ClientInfo] = {}
sender_queue_id: Optional[str] = event_template.get("sender_queue_id", None)
def is_sender_client(client: ClientDescriptor) -> bool:
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
# If we're on a public stream, look for clients (typically belonging to
# bots) that are registered to get events for ALL streams.
if "stream_name" in event_template and not event_template.get("invite_only"):
realm_id = event_template["realm_id"]
for client in get_client_descriptors_for_realm_all_streams(realm_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=[],
is_sender=is_sender_client(client),
)
for user_data in users:
user_profile_id: int = user_data["id"]
flags: Collection[str] = user_data.get("flags", [])
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=flags,
is_sender=is_sender_client(client),
)
return send_to_clients
def process_message_event(
event_template: Mapping[str, Any], users: Collection[Mapping[str, Any]]
) -> None:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
send_to_clients = get_client_info_for_message_event(event_template, users)
presence_idle_user_ids = set(event_template.get("presence_idle_user_ids", []))
online_push_user_ids = set(event_template.get("online_push_user_ids", []))
pm_mention_push_disabled_user_ids = set(
event_template.get("pm_mention_push_disabled_user_ids", [])
)
pm_mention_email_disabled_user_ids = set(
event_template.get("pm_mention_email_disabled_user_ids", [])
)
stream_push_user_ids = set(event_template.get("stream_push_user_ids", []))
stream_email_user_ids = set(event_template.get("stream_email_user_ids", []))
wildcard_mention_user_ids = set(event_template.get("wildcard_mention_user_ids", []))
muted_sender_user_ids = set(event_template.get("muted_sender_user_ids", []))
wide_dict: Dict[str, Any] = event_template["message_dict"]
# Temporary transitional code: Zulip servers that have message
# events in their event queues and upgrade to the new version
# that expects sender_delivery_email in these events will
# throw errors processing events. We can remove this block
# once we don't expect anyone to be directly upgrading from
# 2.0.x to the latest Zulip.
if "sender_delivery_email" not in wide_dict: # nocoverage
wide_dict["sender_delivery_email"] = wide_dict["sender_email"]
sender_id: int = wide_dict["sender_id"]
message_id: int = wide_dict["id"]
message_type: str = wide_dict["type"]
sending_client: str = wide_dict["client"]
@cachify
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
return MessageDict.finalize_payload(
wide_dict,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
# Extra user-specific data to include
extra_user_data: Dict[int, Any] = {}
for user_data in users:
user_profile_id: int = user_data["id"]
flags: Collection[str] = user_data.get("flags", [])
mentioned_user_group_id: Optional[int] = user_data.get("mentioned_user_group_id")
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
private_message = message_type == "private"
user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
user_id=user_profile_id,
flags=flags,
private_message=private_message,
online_push_user_ids=online_push_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
)
internal_data = asdict(user_notifications_data)
# Remove fields sent through other pipes to save some space.
internal_data.pop("user_id")
internal_data["mentioned_user_group_id"] = mentioned_user_group_id
extra_user_data[user_profile_id] = dict(internal_data=internal_data)
# If the message isn't notifiable had the user been idle, then the user
# shouldn't receive notifications even if they were online. In that case we can
# avoid the more expensive `receiver_is_off_zulip` call, and move on to process
# the next user.
if not user_notifications_data.is_notifiable(acting_user_id=sender_id, idle=True):
continue
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
extra_user_data[user_profile_id]["internal_data"].update(
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
acting_user_id=sender_id,
message_id=message_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified={},
)
)
for client_data in send_to_clients.values():
client = client_data["client"]
flags = client_data["flags"]
is_sender: bool = client_data.get("is_sender", False)
extra_data: Optional[Mapping[str, bool]] = extra_user_data.get(client.user_profile_id, None)
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event: Dict[str, Any] = dict(type="message", message=message_dict, flags=flags)
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get("local_id", None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if "mirror" in sending_client and sending_client.lower() == client.client_type_name.lower():
continue
client.add_event(user_event)
def process_presence_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
if "user_id" not in event:
# We only recently added `user_id` to presence data.
# Any old events in our queue can just be dropped,
# since presence events are pretty ephemeral in nature.
logging.warning("Dropping some obsolete presence events after upgrade.")
slim_event = dict(
type="presence",
user_id=event["user_id"],
server_timestamp=event["server_timestamp"],
presence=event["presence"],
)
legacy_event = dict(
type="presence",
user_id=event["user_id"],
email=event["email"],
server_timestamp=event["server_timestamp"],
presence=event["presence"],
)
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
if client.slim_presence:
client.add_event(slim_event)
else:
client.add_event(legacy_event)
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(event)
def process_deletion_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if not client.accepts_event(event):
continue
# For clients which support message deletion in bulk, we
# send a list of msgs_ids together, otherwise we send a
# delete event for each message. All clients will be
# required to support bulk_message_deletion in the future;
# this logic is intended for backwards-compatibility only.
if client.bulk_message_deletion:
client.add_event(event)
continue
for message_id in event["message_ids"]:
# We use the following rather than event.copy()
# because the read-only Mapping type doesn't support .copy().
compatibility_event = dict(event)
compatibility_event["message_id"] = message_id
del compatibility_event["message_ids"]
client.add_event(compatibility_event)
def process_message_update_event(
orig_event: Mapping[str, Any], users: Iterable[Mapping[str, Any]]
) -> None:
# Extract the parameters passed via the event object that don't
# belong in the actual events.
event_template = dict(orig_event)
prior_mention_user_ids = set(event_template.pop("prior_mention_user_ids", []))
presence_idle_user_ids = set(event_template.pop("presence_idle_user_ids", []))
pm_mention_push_disabled_user_ids = set(
event_template.pop("pm_mention_push_disabled_user_ids", [])
)
pm_mention_email_disabled_user_ids = set(
event_template.pop("pm_mention_email_disabled_user_ids", [])
)
stream_push_user_ids = set(event_template.pop("stream_push_user_ids", []))
stream_email_user_ids = set(event_template.pop("stream_email_user_ids", []))
wildcard_mention_user_ids = set(event_template.pop("wildcard_mention_user_ids", []))
muted_sender_user_ids = set(event_template.pop("muted_sender_user_ids", []))
# TODO/compatibility: Translation code for the rename of
# `push_notify_user_ids` to `online_push_user_ids`. Remove this
# when one can no longer directly upgrade from 4.x to main.
online_push_user_ids = set()
if "online_push_user_ids" in event_template:
online_push_user_ids = set(event_template.pop("online_push_user_ids"))
elif "push_notify_user_ids" in event_template:
online_push_user_ids = set(event_template.pop("push_notify_user_ids"))
stream_name = event_template.get("stream_name")
message_id = event_template["message_id"]
for user_data in users:
user_profile_id = user_data["id"]
if "user_id" in event_template:
# The user we'll get here will be the sender if the message's
# content was edited, and the editor for topic edits. That's
# the correct "acting_user" for both cases.
acting_user_id = event_template["user_id"]
else:
# Events without a `user_id` field come from the do_update_embedded_data
# code path, and represent just rendering previews; there should be no
# real content changes.
# It doesn't really matter what we set `acting_user_id` in this case,
# becuase we know this event isn't meant to send notifications.
acting_user_id = user_profile_id
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
flags: Collection[str] = user_event["flags"]
user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
user_id=user_profile_id,
flags=flags,
private_message=(stream_name is None),
online_push_user_ids=online_push_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
)
maybe_enqueue_notifications_for_message_update(
user_notifications_data=user_notifications_data,
message_id=message_id,
acting_user_id=acting_user_id,
private_message=(stream_name is None),
presence_idle=(user_profile_id in presence_idle_user_ids),
prior_mentioned=(user_profile_id in prior_mention_user_ids),
)
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
# We need to do another shallow copy, or we risk
# sending the same event to multiple clients.
client.add_event(user_event)
def maybe_enqueue_notifications_for_message_update(
user_notifications_data: UserMessageNotificationsData,
message_id: int,
acting_user_id: int,
private_message: bool,
presence_idle: bool,
prior_mentioned: bool,
) -> None:
if user_notifications_data.sender_is_muted:
# Never send notifications if the sender has been muted
return
if private_message:
# We don't do offline notifications for PMs, because
# we already notified the user of the original message
return
if prior_mentioned:
# Don't spam people with duplicate mentions. This is
# especially important considering that most message
# edits are simple typo corrections.
#
# Note that prior_mention_user_ids contains users who received
# a wildcard mention as well as normal mentions.
#
# TODO: Ideally, that would mean that we exclude here cases
# where user_profile.wildcard_mentions_notify=False and have
# those still send a notification. However, we don't have the
# data to determine whether or not that was the case at the
# time the original message was sent, so we can't do that
# without extending the UserMessage data model.
return
if user_notifications_data.stream_push_notify or user_notifications_data.stream_email_notify:
# Currently we assume that if this flag is set to True, then
# the user already was notified about the earlier message,
# so we short circuit. We may handle this more rigorously
# in the future by looking at something like an AlreadyNotified
# model.
return
idle = presence_idle or receiver_is_off_zulip(user_notifications_data.user_id)
# We don't yet support custom user group mentions for message edit notifications.
# Users will still receive notifications (because of the mentioned flag), but those
# will be as if they were mentioned personally.
mentioned_user_group_id = None
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
message_id=message_id,
acting_user_id=acting_user_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified={},
)
def reformat_legacy_send_message_event(
event: Mapping[str, Any], users: Union[List[int], List[Mapping[str, Any]]]
) -> Tuple[MutableMapping[str, Any], Collection[MutableMapping[str, Any]]]:
# do_send_messages used to send events with users in dict format, with the
# dict containing the user_id and other data. We later trimmed down the user
# data to only contain the user_id and the usermessage flags, and put everything
# else in the event dict as lists.
# This block handles any old-format events still in the queue during upgrade.
modern_event = cast(MutableMapping[str, Any], event)
user_dicts = cast(List[MutableMapping[str, Any]], users)
# Back-calculate the older all-booleans format data in the `users` dicts into the newer
# all-lists format, and attach the lists to the `event` object.
modern_event["online_push_user_ids"] = []
modern_event["stream_push_user_ids"] = []
modern_event["stream_email_user_ids"] = []
modern_event["wildcard_mention_user_ids"] = []
modern_event["muted_sender_user_ids"] = []
for user in user_dicts:
user_id = user["id"]
if user.pop("stream_push_notify", False):
modern_event["stream_push_user_ids"].append(user_id)
if user.pop("stream_email_notify", False):
modern_event["stream_email_user_ids"].append(user_id)
if user.pop("wildcard_mention_notify", False):
modern_event["wildcard_mention_user_ids"].append(user_id)
if user.pop("sender_is_muted", False):
modern_event["muted_sender_user_ids"].append(user_id)
# TODO/compatibility: Another translation code block for the rename of
# `always_push_notify` to `online_push_enabled`. Remove this
# when one can no longer directly upgrade from 4.x to 5.0-dev.
if user.pop("online_push_enabled", False) or user.pop("always_push_notify", False):
modern_event["online_push_user_ids"].append(user_id)
# We can calculate `mentioned` from the usermessage flags, so just remove it
user.pop("mentioned", False)
return (modern_event, user_dicts)
def process_notification(notice: Mapping[str, Any]) -> None:
event: Mapping[str, Any] = notice["event"]
users: Union[List[int], List[Mapping[str, Any]]] = notice["users"]
start_time = time.time()
if event["type"] == "message":
if len(users) > 0 and isinstance(users[0], dict) and "stream_push_notify" in users[0]:
# TODO/compatibility: Remove this whole block once one can no
# longer directly upgrade directly from 4.x to 5.0-dev.
modern_event, user_dicts = reformat_legacy_send_message_event(event, users)
process_message_event(modern_event, user_dicts)
else:
process_message_event(event, cast(List[Mapping[str, Any]], users))
elif event["type"] == "update_message":
process_message_update_event(event, cast(List[Mapping[str, Any]], users))
elif event["type"] == "delete_message":
if len(users) > 0 and isinstance(users[0], dict):
# do_delete_messages used to send events with users in
# dict format {"id": <int>} This block is here for
# compatibility with events in that format still in the
# queue at the time of upgrade.
#
# TODO/compatibility: Remove this block once you can no
# longer directly upgrade directly from 4.x to main.
user_ids: List[int] = [user["id"] for user in cast(List[Mapping[str, Any]], users)]
else:
user_ids = cast(List[int], users)
process_deletion_event(event, user_ids)
elif event["type"] == "presence":
process_presence_event(event, cast(List[int], users))
else:
process_event(event, cast(List[int], users))
logging.debug(
"Tornado: Event %s for %s users took %sms",
event["type"],
len(users),
int(1000 * (time.time() - start_time)),
)
def get_wrapped_process_notification(queue_name: str) -> Callable[[List[Dict[str, Any]]], None]:
def failure_processor(notice: Dict[str, Any]) -> None:
logging.error(
"Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n",
notice,
traceback.format_exc(),
)
def wrapped_process_notification(notices: List[Dict[str, Any]]) -> None:
for notice in notices:
try:
process_notification(notice)
except Exception:
retry_event(queue_name, notice, failure_processor)
return wrapped_process_notification
| # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import atexit
import copy
import logging
import os
import random
import signal
import sys
import time
import traceback
from collections import deque
from dataclasses import asdict
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Deque,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import orjson
import tornado.ioloop
from django.conf import settings
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from version import API_FEATURE_LEVEL, ZULIP_MERGE_BASE, ZULIP_VERSION
from zerver.decorator import cachify
from zerver.lib.exceptions import JsonableError
from zerver.lib.message import MessageDict
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.notification_data import UserMessageNotificationsData
from zerver.lib.queue import queue_json_publish, retry_event
from zerver.lib.utils import statsd
from zerver.middleware import async_request_timer_restart
from zerver.tornado.autoreload import add_reload_hook
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.handlers import (
clear_handler_by_id,
finish_handler,
get_handler_by_id,
handler_stats_string,
)
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
# We garbage-collect every minute; this is totally fine given that the
# GC scan takes ~2ms with 1000 event queues.
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 1
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
def create_heartbeat_event() -> Dict[str, str]:
return dict(type="heartbeat")
class ClientDescriptor:
def __init__(
self,
user_profile_id: int,
realm_id: int,
event_queue: "EventQueue",
event_types: Optional[Sequence[str]],
client_type_name: str,
apply_markdown: bool = True,
client_gravatar: bool = True,
slim_presence: bool = False,
all_public_streams: bool = False,
lifespan_secs: int = 0,
narrow: Collection[Sequence[str]] = [],
bulk_message_deletion: bool = False,
stream_typing_notifications: bool = False,
user_settings_object: bool = False,
) -> None:
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.realm_id = realm_id
self.current_handler_id: Optional[int] = None
self.current_client_name: Optional[str] = None
self.event_queue = event_queue
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.slim_presence = slim_presence
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle: Any = None # TODO: should be return type of ioloop.call_later
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
self.bulk_message_deletion = bulk_message_deletion
self.stream_typing_notifications = stream_typing_notifications
self.user_settings_object = user_settings_object
# Default for lifespan_secs is DEFAULT_EVENT_QUEUE_TIMEOUT_SECS;
# but users can set it as high as MAX_QUEUE_TIMEOUT_SECS.
if lifespan_secs == 0:
lifespan_secs = DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
self.queue_timeout = min(lifespan_secs, MAX_QUEUE_TIMEOUT_SECS)
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(
user_profile_id=self.user_profile_id,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
client_gravatar=self.client_gravatar,
slim_presence=self.slim_presence,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name,
bulk_message_deletion=self.bulk_message_deletion,
stream_typing_notifications=self.stream_typing_notifications,
user_settings_object=self.user_settings_object,
)
def __repr__(self) -> str:
return f"ClientDescriptor<{self.event_queue.id}>"
@classmethod
def from_dict(cls, d: MutableMapping[str, Any]) -> "ClientDescriptor":
if "client_type" in d:
# Temporary migration for the rename of client_type to client_type_name
d["client_type_name"] = d["client_type"]
if "client_gravatar" not in d:
# Temporary migration for the addition of the client_gravatar field
d["client_gravatar"] = False
if "slim_presence" not in d:
d["slim_presence"] = False
ret = cls(
d["user_profile_id"],
d["realm_id"],
EventQueue.from_dict(d["event_queue"]),
d["event_types"],
d["client_type_name"],
d["apply_markdown"],
d["client_gravatar"],
d["slim_presence"],
d["all_public_streams"],
d["queue_timeout"],
d.get("narrow", []),
d.get("bulk_message_deletion", False),
d.get("stream_typing_notifications", False),
d.get("user_settings_object", False),
)
ret.last_connection_time = d["last_connection_time"]
return ret
def add_event(self, event: Mapping[str, Any]) -> None:
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_timer_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self) -> bool:
if self.current_handler_id is not None:
err_msg = f"Got error finishing handler for queue {self.event_queue.id}"
try:
finish_handler(
self.current_handler_id,
self.event_queue.id,
self.event_queue.contents(),
self.apply_markdown,
)
except Exception:
logging.exception(err_msg, stack_info=True)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event: Mapping[str, Any]) -> bool:
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
if event["type"] == "typing" and "stream_id" in event:
# Typing notifications for stream messages are only
# delivered if the stream_typing_notifications
# client_capability is enabled, for backwards compatibility.
return self.stream_typing_notifications
if self.user_settings_object and event["type"] in [
"update_display_settings",
"update_global_notifications",
]:
# 'update_display_settings' and 'update_global_notifications'
# events are sent only if user_settings_object is False,
# otherwise only 'user_settings' event is sent.
return False
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self) -> bool:
return self.event_types is None or "message" in self.event_types
def expired(self, now: float) -> bool:
return (
self.current_handler_id is None
and now - self.last_connection_time >= self.queue_timeout
)
def connect_handler(self, handler_id: int, client_name: str) -> None:
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback() -> None:
self._timeout_handle = None
# All clients get heartbeat events
heartbeat_event = create_heartbeat_event()
self.add_event(heartbeat_event)
ioloop = tornado.ioloop.IOLoop.instance()
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != "API: heartbeat test":
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
def disconnect_handler(self, client_closed: bool = False) -> None:
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info(
"Client disconnected for queue %s (%s via %s)",
self.event_queue.id,
self.user_profile_id,
self.current_client_name,
)
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self) -> None:
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can clean up their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id}, {self.realm_id})
def compute_full_event_type(event: Mapping[str, Any]) -> str:
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/{}/{}".format(event["flag"], event["operation"])
return "flags/{}/{}".format(event["operation"], event["flag"])
return event["type"]
class EventQueue:
def __init__(self, id: str) -> None:
# When extending this list of properties, one must be sure to
# update to_dict and from_dict.
self.queue: Deque[Dict[str, Any]] = deque()
self.next_event_id: int = 0
# will only be None for migration from old versions
self.newest_pruned_id: Optional[int] = -1
self.id: str = id
self.virtual_events: Dict[str, Dict[str, Any]] = {}
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
d = dict(
id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events,
)
if self.newest_pruned_id is not None:
d["newest_pruned_id"] = self.newest_pruned_id
return d
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "EventQueue":
ret = cls(d["id"])
ret.next_event_id = d["next_event_id"]
ret.newest_pruned_id = d.get("newest_pruned_id", None)
ret.queue = deque(d["queue"])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, orig_event: Mapping[str, Any]) -> None:
# By default, we make a shallow copy of the event dictionary
# to push into the target event queue; this allows the calling
# code to send the same "event" object to multiple queues.
# This behavior is important because the event_queue system is
# about to mutate the event dictionary, minimally to add the
# event_id attribute.
event = dict(orig_event)
event["id"] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if full_event_type == "restart" or full_event_type.startswith("flags/"):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self) -> Dict[str, Any]:
return self.queue.popleft()
def empty(self) -> bool:
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id: int) -> None:
while len(self.queue) != 0 and self.queue[0]["id"] <= through_id:
self.newest_pruned_id = self.queue[0]["id"]
self.pop()
def contents(self, include_internal_data: bool = False) -> List[Dict[str, Any]]:
contents: List[Dict[str, Any]] = []
virtual_id_map: Dict[str, Dict[str, Any]] = {}
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(virtual_id_map.keys())
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
if include_internal_data:
return contents
return prune_internal_data(contents)
def prune_internal_data(events: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Prunes the internal_data data structures, which are not intended to
be exposed to API clients.
"""
events = copy.deepcopy(events)
for event in events:
if event["type"] == "message" and "internal_data" in event:
del event["internal_data"]
return events
# maps queue ids to client descriptors
clients: Dict[str, ClientDescriptor] = {}
# maps user id to list of client descriptors
user_clients: Dict[int, List[ClientDescriptor]] = {}
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams: Dict[int, List[ClientDescriptor]] = {}
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks: List[Callable[[int, ClientDescriptor, bool], None]] = []
next_queue_id = 0
def clear_client_event_queues_for_testing() -> None:
assert settings.TEST_SUITE
clients.clear()
user_clients.clear()
realm_clients_all_streams.clear()
gc_hooks.clear()
global next_queue_id
next_queue_id = 0
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
gc_hooks.append(hook)
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
try:
return clients[queue_id]
except KeyError:
raise BadEventQueueIdError(queue_id)
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client: ClientDescriptor) -> None:
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ":" + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(
to_remove: AbstractSet[str], affected_users: AbstractSet[int], affected_realms: AbstractSet[int]
) -> None:
def filter_client_dict(
client_dict: MutableMapping[int, List[ClientDescriptor]], key: int
) -> None:
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(
clients[id].user_profile_id,
clients[id],
clients[id].user_profile_id not in user_clients,
)
del clients[id]
def gc_event_queues(port: int) -> None:
start = time.time()
to_remove: Set[str] = set()
affected_users: Set[int] = set()
affected_realms: Set[int] = set()
for (id, client) in clients.items():
if client.expired(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle (because
# they are expired) and thus not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
if settings.PRODUCTION:
logging.info(
"Tornado %d removed %d expired event queues owned by %d users in %.3fs."
" Now %d active queues, %s",
port,
len(to_remove),
len(affected_users),
time.time() - start,
len(clients),
handler_stats_string(),
)
statsd.gauge("tornado.active_queues", len(clients))
statsd.gauge("tornado.active_users", len(user_clients))
def persistent_queue_filename(port: int, last: bool = False) -> str:
if settings.TORNADO_PROCESSES == 1:
# Use non-port-aware, legacy version.
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("",) + ".last"
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("",)
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("." + str(port) + ".last",)
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ("." + str(port),)
def dump_event_queues(port: int) -> None:
start = time.time()
with open(persistent_queue_filename(port), "wb") as stored_queues:
stored_queues.write(
orjson.dumps([(qid, client.to_dict()) for (qid, client) in clients.items()])
)
if len(clients) > 0 or settings.PRODUCTION:
logging.info(
"Tornado %d dumped %d event queues in %.3fs", port, len(clients), time.time() - start
)
def load_event_queues(port: int) -> None:
global clients
start = time.time()
try:
with open(persistent_queue_filename(port), "rb") as stored_queues:
data = orjson.loads(stored_queues.read())
except FileNotFoundError:
pass
except orjson.JSONDecodeError:
logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
else:
try:
clients = {qid: ClientDescriptor.from_dict(client) for (qid, client) in data}
except Exception:
logging.exception(
"Tornado %d could not deserialize event queues", port, stack_info=True
)
for client in clients.values():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
if len(clients) > 0 or settings.PRODUCTION:
logging.info(
"Tornado %d loaded %d event queues in %.3fs", port, len(clients), time.time() - start
)
def send_restart_events(immediate: bool = False) -> None:
event: Dict[str, Any] = dict(
type="restart",
zulip_version=ZULIP_VERSION,
zulip_merge_base=ZULIP_MERGE_BASE,
zulip_feature_level=API_FEATURE_LEVEL,
server_generation=settings.SERVER_GENERATION,
)
if immediate:
event["immediate"] = True
for client in clients.values():
if client.accepts_event(event):
client.add_event(event)
def setup_event_queue(port: int) -> None:
if not settings.TEST_SUITE:
load_event_queues(port)
atexit.register(dump_event_queues, port)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
add_reload_hook(lambda: dump_event_queues(port))
try:
os.rename(persistent_queue_filename(port), persistent_queue_filename(port, last=True))
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(
lambda: gc_event_queues(port), EVENT_QUEUE_GC_FREQ_MSECS, ioloop
)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
queue_id: Optional[str] = query["queue_id"]
dont_block: bool = query["dont_block"]
last_event_id: Optional[int] = query["last_event_id"]
user_profile_id: int = query["user_profile_id"]
new_queue_data: Optional[MutableMapping[str, Any]] = query.get("new_queue_data")
client_type_name: str = query["client_type_name"]
handler_id: int = query["handler_id"]
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
assert new_queue_data is not None
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
if (
client.event_queue.newest_pruned_id is not None
and last_event_id < client.event_queue.newest_pruned_id
):
raise JsonableError(
_("An event newer than {event_id} has already been pruned!").format(
event_id=last_event_id,
)
)
client.event_queue.prune(last_event_id)
if (
client.event_queue.newest_pruned_id is not None
and last_event_id != client.event_queue.newest_pruned_id
):
raise JsonableError(
_("Event {event_id} was not in this queue").format(
event_id=last_event_id,
)
)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response: Dict[str, Any] = dict(
events=client.event_queue.contents(),
)
if orig_queue_id is None:
response["queue_id"] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[{}/{}/{}]".format(
queue_id, len(response["events"]), response["events"][0]["type"]
)
else:
extra_log_data = "[{}/{}]".format(queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info(
"Disconnected handler for queue %s (%s/%s)",
queue_id,
user_profile_id,
client_type_name,
)
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
return {
"user_profile_id": user_profile_id,
"message_id": message_id,
}
def missedmessage_hook(
user_profile_id: int, client: ClientDescriptor, last_for_client: bool
) -> None:
"""The receiver_is_off_zulip logic used to determine whether a user
has no active client suffers from a somewhat fundamental race
condition. If the client is no longer on the Internet,
receiver_is_off_zulip will still return False for
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
garbage-collected. This would cause us to reliably miss
push/email notifying users for messages arriving during the
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
example). We address this by, when the queue is garbage-collected
at the end of those 10 minutes, checking to see if it's the last
one, and if so, potentially triggering notifications to the user
at that time, resulting in at most a DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
delay in the arrival of their notifications.
As Zulip's APIs get more popular and the mobile apps start using
long-lived event queues for perf optimization, future versions of
this will likely need to replace checking `last_for_client` with
something more complicated, so that we only consider clients like
web browsers, not the mobile apps or random API scripts.
"""
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
for event in client.event_queue.contents(include_internal_data=True):
if event["type"] != "message":
continue
internal_data = event.get("internal_data", {})
sender_id = event["message"]["sender_id"]
user_notifications_data = UserMessageNotificationsData(
user_id=user_profile_id,
sender_is_muted=internal_data.get("sender_is_muted", False),
pm_push_notify=internal_data.get("pm_push_notify", False),
pm_email_notify=internal_data.get("pm_email_notify", False),
mention_push_notify=internal_data.get("mention_push_notify", False),
mention_email_notify=internal_data.get("mention_email_notify", False),
wildcard_mention_push_notify=internal_data.get("wildcard_mention_push_notify", False),
wildcard_mention_email_notify=internal_data.get("wildcard_mention_email_notify", False),
stream_push_notify=internal_data.get("stream_push_notify", False),
stream_email_notify=internal_data.get("stream_email_notify", False),
# Since one is by definition idle, we don't need to check online_push_enabled
online_push_enabled=False,
)
mentioned_user_group_id = internal_data.get("mentioned_user_group_id")
# Since we just GC'd the last event queue, the user is definitely idle.
idle = True
message_id = event["message"]["id"]
# Pass on the information on whether a push or email notification was already sent.
already_notified = dict(
push_notified=internal_data.get("push_notified", False),
email_notified=internal_data.get("email_notified", False),
)
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
acting_user_id=sender_id,
message_id=message_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified=already_notified,
)
def receiver_is_off_zulip(user_profile_id: int) -> bool:
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them.
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [
client for client in all_client_descriptors if client.accepts_messages()
]
off_zulip = len(message_event_queues) == 0
return off_zulip
def maybe_enqueue_notifications(
*,
user_notifications_data: UserMessageNotificationsData,
acting_user_id: int,
message_id: int,
mentioned_user_group_id: Optional[int],
idle: bool,
already_notified: Dict[str, bool],
) -> Dict[str, bool]:
"""This function has a complete unit test suite in
`test_enqueue_notifications` that should be expanded as we add
more features here.
See https://zulip.readthedocs.io/en/latest/subsystems/notifications.html
for high-level design documentation.
"""
notified: Dict[str, bool] = {}
if user_notifications_data.is_push_notifiable(acting_user_id, idle):
notice = build_offline_notification(user_notifications_data.user_id, message_id)
notice["trigger"] = user_notifications_data.get_push_notification_trigger(
acting_user_id, idle
)
notice["type"] = "add"
notice["mentioned_user_group_id"] = mentioned_user_group_id
if not already_notified.get("push_notified"):
queue_json_publish("missedmessage_mobile_notifications", notice)
notified["push_notified"] = True
# Send missed_message emails if a private message or a
# mention. Eventually, we'll add settings to allow email
# notifications to match the model of push notifications
# above.
if user_notifications_data.is_email_notifiable(acting_user_id, idle):
notice = build_offline_notification(user_notifications_data.user_id, message_id)
notice["trigger"] = user_notifications_data.get_email_notification_trigger(
acting_user_id, idle
)
notice["mentioned_user_group_id"] = mentioned_user_group_id
if not already_notified.get("email_notified"):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified["email_notified"] = True
return notified
class ClientInfo(TypedDict):
client: ClientDescriptor
flags: Collection[str]
is_sender: bool
def get_client_info_for_message_event(
event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]
) -> Dict[str, ClientInfo]:
"""
Return client info for all the clients interested in a message.
This basically includes clients for users who are recipients
of the message, with some nuances for bots that auto-subscribe
to all streams, plus users who may be mentioned, etc.
"""
send_to_clients: Dict[str, ClientInfo] = {}
sender_queue_id: Optional[str] = event_template.get("sender_queue_id", None)
def is_sender_client(client: ClientDescriptor) -> bool:
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
# If we're on a public stream, look for clients (typically belonging to
# bots) that are registered to get events for ALL streams.
if "stream_name" in event_template and not event_template.get("invite_only"):
realm_id = event_template["realm_id"]
for client in get_client_descriptors_for_realm_all_streams(realm_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=[],
is_sender=is_sender_client(client),
)
for user_data in users:
user_profile_id: int = user_data["id"]
flags: Collection[str] = user_data.get("flags", [])
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=flags,
is_sender=is_sender_client(client),
)
return send_to_clients
def process_message_event(
event_template: Mapping[str, Any], users: Collection[Mapping[str, Any]]
) -> None:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
send_to_clients = get_client_info_for_message_event(event_template, users)
presence_idle_user_ids = set(event_template.get("presence_idle_user_ids", []))
online_push_user_ids = set(event_template.get("online_push_user_ids", []))
pm_mention_push_disabled_user_ids = set(
event_template.get("pm_mention_push_disabled_user_ids", [])
)
pm_mention_email_disabled_user_ids = set(
event_template.get("pm_mention_email_disabled_user_ids", [])
)
stream_push_user_ids = set(event_template.get("stream_push_user_ids", []))
stream_email_user_ids = set(event_template.get("stream_email_user_ids", []))
wildcard_mention_user_ids = set(event_template.get("wildcard_mention_user_ids", []))
muted_sender_user_ids = set(event_template.get("muted_sender_user_ids", []))
wide_dict: Dict[str, Any] = event_template["message_dict"]
# Temporary transitional code: Zulip servers that have message
# events in their event queues and upgrade to the new version
# that expects sender_delivery_email in these events will
# throw errors processing events. We can remove this block
# once we don't expect anyone to be directly upgrading from
# 2.0.x to the latest Zulip.
if "sender_delivery_email" not in wide_dict: # nocoverage
wide_dict["sender_delivery_email"] = wide_dict["sender_email"]
sender_id: int = wide_dict["sender_id"]
message_id: int = wide_dict["id"]
message_type: str = wide_dict["type"]
sending_client: str = wide_dict["client"]
@cachify
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
return MessageDict.finalize_payload(
wide_dict,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
)
# Extra user-specific data to include
extra_user_data: Dict[int, Any] = {}
for user_data in users:
user_profile_id: int = user_data["id"]
flags: Collection[str] = user_data.get("flags", [])
mentioned_user_group_id: Optional[int] = user_data.get("mentioned_user_group_id")
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
private_message = message_type == "private"
user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
user_id=user_profile_id,
flags=flags,
private_message=private_message,
online_push_user_ids=online_push_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
)
internal_data = asdict(user_notifications_data)
# Remove fields sent through other pipes to save some space.
internal_data.pop("user_id")
internal_data["mentioned_user_group_id"] = mentioned_user_group_id
extra_user_data[user_profile_id] = dict(internal_data=internal_data)
# If the message isn't notifiable had the user been idle, then the user
# shouldn't receive notifications even if they were online. In that case we can
# avoid the more expensive `receiver_is_off_zulip` call, and move on to process
# the next user.
if not user_notifications_data.is_notifiable(acting_user_id=sender_id, idle=True):
continue
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
extra_user_data[user_profile_id]["internal_data"].update(
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
acting_user_id=sender_id,
message_id=message_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified={},
)
)
for client_data in send_to_clients.values():
client = client_data["client"]
flags = client_data["flags"]
is_sender: bool = client_data.get("is_sender", False)
extra_data: Optional[Mapping[str, bool]] = extra_user_data.get(client.user_profile_id, None)
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event: Dict[str, Any] = dict(type="message", message=message_dict, flags=flags)
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get("local_id", None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if "mirror" in sending_client and sending_client.lower() == client.client_type_name.lower():
continue
client.add_event(user_event)
def process_presence_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
if "user_id" not in event:
# We only recently added `user_id` to presence data.
# Any old events in our queue can just be dropped,
# since presence events are pretty ephemeral in nature.
logging.warning("Dropping some obsolete presence events after upgrade.")
slim_event = dict(
type="presence",
user_id=event["user_id"],
server_timestamp=event["server_timestamp"],
presence=event["presence"],
)
legacy_event = dict(
type="presence",
user_id=event["user_id"],
email=event["email"],
server_timestamp=event["server_timestamp"],
presence=event["presence"],
)
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
if client.slim_presence:
client.add_event(slim_event)
else:
client.add_event(legacy_event)
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(event)
def process_deletion_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if not client.accepts_event(event):
continue
# For clients which support message deletion in bulk, we
# send a list of msgs_ids together, otherwise we send a
# delete event for each message. All clients will be
# required to support bulk_message_deletion in the future;
# this logic is intended for backwards-compatibility only.
if client.bulk_message_deletion:
client.add_event(event)
continue
for message_id in event["message_ids"]:
# We use the following rather than event.copy()
# because the read-only Mapping type doesn't support .copy().
compatibility_event = dict(event)
compatibility_event["message_id"] = message_id
del compatibility_event["message_ids"]
client.add_event(compatibility_event)
def process_message_update_event(
orig_event: Mapping[str, Any], users: Iterable[Mapping[str, Any]]
) -> None:
# Extract the parameters passed via the event object that don't
# belong in the actual events.
event_template = dict(orig_event)
prior_mention_user_ids = set(event_template.pop("prior_mention_user_ids", []))
presence_idle_user_ids = set(event_template.pop("presence_idle_user_ids", []))
pm_mention_push_disabled_user_ids = set(
event_template.pop("pm_mention_push_disabled_user_ids", [])
)
pm_mention_email_disabled_user_ids = set(
event_template.pop("pm_mention_email_disabled_user_ids", [])
)
stream_push_user_ids = set(event_template.pop("stream_push_user_ids", []))
stream_email_user_ids = set(event_template.pop("stream_email_user_ids", []))
wildcard_mention_user_ids = set(event_template.pop("wildcard_mention_user_ids", []))
muted_sender_user_ids = set(event_template.pop("muted_sender_user_ids", []))
# TODO/compatibility: Translation code for the rename of
# `push_notify_user_ids` to `online_push_user_ids`. Remove this
# when one can no longer directly upgrade from 4.x to main.
online_push_user_ids = set()
if "online_push_user_ids" in event_template:
online_push_user_ids = set(event_template.pop("online_push_user_ids"))
elif "push_notify_user_ids" in event_template:
online_push_user_ids = set(event_template.pop("push_notify_user_ids"))
stream_name = event_template.get("stream_name")
message_id = event_template["message_id"]
for user_data in users:
user_profile_id = user_data["id"]
if "user_id" in event_template:
# The user we'll get here will be the sender if the message's
# content was edited, and the editor for topic edits. That's
# the correct "acting_user" for both cases.
acting_user_id = event_template["user_id"]
else:
# Events without a `user_id` field come from the do_update_embedded_data
# code path, and represent just rendering previews; there should be no
# real content changes.
# It doesn't really matter what we set `acting_user_id` in this case,
# becuase we know this event isn't meant to send notifications.
acting_user_id = user_profile_id
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
flags: Collection[str] = user_event["flags"]
user_notifications_data = UserMessageNotificationsData.from_user_id_sets(
user_id=user_profile_id,
flags=flags,
private_message=(stream_name is None),
online_push_user_ids=online_push_user_ids,
pm_mention_push_disabled_user_ids=pm_mention_push_disabled_user_ids,
pm_mention_email_disabled_user_ids=pm_mention_email_disabled_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
)
maybe_enqueue_notifications_for_message_update(
user_notifications_data=user_notifications_data,
message_id=message_id,
acting_user_id=acting_user_id,
private_message=(stream_name is None),
presence_idle=(user_profile_id in presence_idle_user_ids),
prior_mentioned=(user_profile_id in prior_mention_user_ids),
)
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
# We need to do another shallow copy, or we risk
# sending the same event to multiple clients.
client.add_event(user_event)
def maybe_enqueue_notifications_for_message_update(
user_notifications_data: UserMessageNotificationsData,
message_id: int,
acting_user_id: int,
private_message: bool,
presence_idle: bool,
prior_mentioned: bool,
) -> None:
if user_notifications_data.sender_is_muted:
# Never send notifications if the sender has been muted
return
if private_message:
# We don't do offline notifications for PMs, because
# we already notified the user of the original message
return
if prior_mentioned:
# Don't spam people with duplicate mentions. This is
# especially important considering that most message
# edits are simple typo corrections.
#
# Note that prior_mention_user_ids contains users who received
# a wildcard mention as well as normal mentions.
#
# TODO: Ideally, that would mean that we exclude here cases
# where user_profile.wildcard_mentions_notify=False and have
# those still send a notification. However, we don't have the
# data to determine whether or not that was the case at the
# time the original message was sent, so we can't do that
# without extending the UserMessage data model.
return
if user_notifications_data.stream_push_notify or user_notifications_data.stream_email_notify:
# Currently we assume that if this flag is set to True, then
# the user already was notified about the earlier message,
# so we short circuit. We may handle this more rigorously
# in the future by looking at something like an AlreadyNotified
# model.
return
idle = presence_idle or receiver_is_off_zulip(user_notifications_data.user_id)
# We don't yet support custom user group mentions for message edit notifications.
# Users will still receive notifications (because of the mentioned flag), but those
# will be as if they were mentioned personally.
mentioned_user_group_id = None
maybe_enqueue_notifications(
user_notifications_data=user_notifications_data,
message_id=message_id,
acting_user_id=acting_user_id,
mentioned_user_group_id=mentioned_user_group_id,
idle=idle,
already_notified={},
)
def reformat_legacy_send_message_event(
event: Mapping[str, Any], users: Union[List[int], List[Mapping[str, Any]]]
) -> Tuple[MutableMapping[str, Any], Collection[MutableMapping[str, Any]]]:
# do_send_messages used to send events with users in dict format, with the
# dict containing the user_id and other data. We later trimmed down the user
# data to only contain the user_id and the usermessage flags, and put everything
# else in the event dict as lists.
# This block handles any old-format events still in the queue during upgrade.
modern_event = cast(MutableMapping[str, Any], event)
user_dicts = cast(List[MutableMapping[str, Any]], users)
# Back-calculate the older all-booleans format data in the `users` dicts into the newer
# all-lists format, and attach the lists to the `event` object.
modern_event["online_push_user_ids"] = []
modern_event["stream_push_user_ids"] = []
modern_event["stream_email_user_ids"] = []
modern_event["wildcard_mention_user_ids"] = []
modern_event["muted_sender_user_ids"] = []
for user in user_dicts:
user_id = user["id"]
if user.pop("stream_push_notify", False):
modern_event["stream_push_user_ids"].append(user_id)
if user.pop("stream_email_notify", False):
modern_event["stream_email_user_ids"].append(user_id)
if user.pop("wildcard_mention_notify", False):
modern_event["wildcard_mention_user_ids"].append(user_id)
if user.pop("sender_is_muted", False):
modern_event["muted_sender_user_ids"].append(user_id)
# TODO/compatibility: Another translation code block for the rename of
# `always_push_notify` to `online_push_enabled`. Remove this
# when one can no longer directly upgrade from 4.x to 5.0-dev.
if user.pop("online_push_enabled", False) or user.pop("always_push_notify", False):
modern_event["online_push_user_ids"].append(user_id)
# We can calculate `mentioned` from the usermessage flags, so just remove it
user.pop("mentioned", False)
return (modern_event, user_dicts)
def process_notification(notice: Mapping[str, Any]) -> None:
event: Mapping[str, Any] = notice["event"]
users: Union[List[int], List[Mapping[str, Any]]] = notice["users"]
start_time = time.time()
if event["type"] == "message":
if len(users) > 0 and isinstance(users[0], dict) and "stream_push_notify" in users[0]:
# TODO/compatibility: Remove this whole block once one can no
# longer directly upgrade directly from 4.x to 5.0-dev.
modern_event, user_dicts = reformat_legacy_send_message_event(event, users)
process_message_event(modern_event, user_dicts)
else:
process_message_event(event, cast(List[Mapping[str, Any]], users))
elif event["type"] == "update_message":
process_message_update_event(event, cast(List[Mapping[str, Any]], users))
elif event["type"] == "delete_message":
if len(users) > 0 and isinstance(users[0], dict):
# do_delete_messages used to send events with users in
# dict format {"id": <int>} This block is here for
# compatibility with events in that format still in the
# queue at the time of upgrade.
#
# TODO/compatibility: Remove this block once you can no
# longer directly upgrade directly from 4.x to main.
user_ids: List[int] = [user["id"] for user in cast(List[Mapping[str, Any]], users)]
else:
user_ids = cast(List[int], users)
process_deletion_event(event, user_ids)
elif event["type"] == "presence":
process_presence_event(event, cast(List[int], users))
else:
process_event(event, cast(List[int], users))
logging.debug(
"Tornado: Event %s for %s users took %sms",
event["type"],
len(users),
int(1000 * (time.time() - start_time)),
)
def get_wrapped_process_notification(queue_name: str) -> Callable[[List[Dict[str, Any]]], None]:
def failure_processor(notice: Dict[str, Any]) -> None:
logging.error(
"Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n",
notice,
traceback.format_exc(),
)
def wrapped_process_notification(notices: List[Dict[str, Any]]) -> None:
for notice in notices:
try:
process_notification(notice)
except Exception:
retry_event(queue_name, notice, failure_processor)
return wrapped_process_notification
| en | 0.90359 | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for # high-level documentation on how this system works. # The idle timeout used to be a week, but we found that in that # situation, queues from dead browser sessions would grow quite large # due to the accumulation of message data in those queues. # We garbage-collect every minute; this is totally fine given that the # GC scan takes ~2ms with 1000 event queues. # Capped limit for how long a client can request an event queue # to live # The heartbeats effectively act as a server-side timeout for # get_events(). The actual timeout value is randomized for each # client connection based on the below value. We ensure that the # maximum timeout value is 55 seconds, to deal with crappy home # wireless routers that kill "inactive" http connections. # These objects are serialized on shutdown and restored on restart. # If fields are added or semantics are changed, temporary code must be # added to load_event_queues() to update the restored objects. # Additionally, the to_dict and from_dict methods must be updated # TODO: should be return type of ioloop.call_later # Default for lifespan_secs is DEFAULT_EVENT_QUEUE_TIMEOUT_SECS; # but users can set it as high as MAX_QUEUE_TIMEOUT_SECS. # If you add a new key to this dict, make sure you add appropriate # migration code in from_dict or load_event_queues to account for # loading event queues that lack that key. # Temporary migration for the rename of client_type to client_type_name # Temporary migration for the addition of the client_gravatar field # Typing notifications for stream messages are only # delivered if the stream_typing_notifications # client_capability is enabled, for backwards compatibility. # 'update_display_settings' and 'update_global_notifications' # events are sent only if user_settings_object is False, # otherwise only 'user_settings' event is sent. # TODO: Refactor so we don't need this function # All clients get heartbeat events # Before we can GC the event queue, we need to disconnect the # handler and notify the client (or connection server) so that # they can clean up their own state related to the GC'd event # queue. Finishing the handler before we GC ensures the # invariant that event queues are idle when passed to # `do_gc_event_queues` is preserved. # Put the "all" case in its own category # When extending this list of properties, one must be sure to # update to_dict and from_dict. # will only be None for migration from old versions # If you add a new key to this dict, make sure you add appropriate # migration code in from_dict or load_event_queues to account for # loading event queues that lack that key. # By default, we make a shallow copy of the event dictionary # to push into the target event queue; this allows the calling # code to send the same "event" object to multiple queues. # This behavior is important because the event_queue system is # about to mutate the event dictionary, minimally to add the # event_id attribute. # Update the virtual event with the values from the event # Note that pop ignores virtual events. This is fine in our # current usage since virtual events should always be resolved to # a real event before being given to users. # See the comment on pop; that applies here as well # Merge the virtual events into their final place in the queue Prunes the internal_data data structures, which are not intended to be exposed to API clients. # maps queue ids to client descriptors # maps user id to list of client descriptors # maps realm id to list of client descriptors with all_public_streams=True # list of registered gc hooks. # each one will be called with a user profile id, queue, and bool # last_for_client that is true if this is the last queue pertaining # to this user_profile_id # that is about to be deleted # We don't need to call e.g. finish_current_handler on the clients # being removed because they are guaranteed to be idle (because # they are expired) and thus not have a current handler. # Use non-port-aware, legacy version. # Put code for migrations due to event queue data format changes here # Make sure we dump event queues even if we exit via signal # Set up event queue garbage collection # After this point, dont_block=False, the queue is empty, and we # have a pre-existing queue, so we wait for new events. The receiver_is_off_zulip logic used to determine whether a user has no active client suffers from a somewhat fundamental race condition. If the client is no longer on the Internet, receiver_is_off_zulip will still return False for DEFAULT_EVENT_QUEUE_TIMEOUT_SECS, until the queue is garbage-collected. This would cause us to reliably miss push/email notifying users for messages arriving during the DEFAULT_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for example). We address this by, when the queue is garbage-collected at the end of those 10 minutes, checking to see if it's the last one, and if so, potentially triggering notifications to the user at that time, resulting in at most a DEFAULT_EVENT_QUEUE_TIMEOUT_SECS delay in the arrival of their notifications. As Zulip's APIs get more popular and the mobile apps start using long-lived event queues for perf optimization, future versions of this will likely need to replace checking `last_for_client` with something more complicated, so that we only consider clients like web browsers, not the mobile apps or random API scripts. # Only process missedmessage hook when the last queue for a # client has been garbage collected # Since one is by definition idle, we don't need to check online_push_enabled # Since we just GC'd the last event queue, the user is definitely idle. # Pass on the information on whether a push or email notification was already sent. # If a user has no message-receiving event queues, they've got no open zulip # session so we notify them. This function has a complete unit test suite in `test_enqueue_notifications` that should be expanded as we add more features here. See https://zulip.readthedocs.io/en/latest/subsystems/notifications.html for high-level design documentation. # Send missed_message emails if a private message or a # mention. Eventually, we'll add settings to allow email # notifications to match the model of push notifications # above. Return client info for all the clients interested in a message. This basically includes clients for users who are recipients of the message, with some nuances for bots that auto-subscribe to all streams, plus users who may be mentioned, etc. # If we're on a public stream, look for clients (typically belonging to # bots) that are registered to get events for ALL streams. See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html for high-level documentation on this subsystem. # Temporary transitional code: Zulip servers that have message # events in their event queues and upgrade to the new version # that expects sender_delivery_email in these events will # throw errors processing events. We can remove this block # once we don't expect anyone to be directly upgrading from # 2.0.x to the latest Zulip. # nocoverage # Extra user-specific data to include # If the recipient was offline and the message was a single or group PM to them # or they were @-notified potentially notify more immediately # Remove fields sent through other pipes to save some space. # If the message isn't notifiable had the user been idle, then the user # shouldn't receive notifications even if they were online. In that case we can # avoid the more expensive `receiver_is_off_zulip` call, and move on to process # the next user. # The actual check is the accepts_event() check below; # this line is just an optimization to avoid copying # message data unnecessarily # Make sure Zephyr mirroring bots know whether stream is invite-only # The below prevents (Zephyr) mirroring loops. # We only recently added `user_id` to presence data. # Any old events in our queue can just be dropped, # since presence events are pretty ephemeral in nature. # For clients which support message deletion in bulk, we # send a list of msgs_ids together, otherwise we send a # delete event for each message. All clients will be # required to support bulk_message_deletion in the future; # this logic is intended for backwards-compatibility only. # We use the following rather than event.copy() # because the read-only Mapping type doesn't support .copy(). # Extract the parameters passed via the event object that don't # belong in the actual events. # TODO/compatibility: Translation code for the rename of # `push_notify_user_ids` to `online_push_user_ids`. Remove this # when one can no longer directly upgrade from 4.x to main. # The user we'll get here will be the sender if the message's # content was edited, and the editor for topic edits. That's # the correct "acting_user" for both cases. # Events without a `user_id` field come from the do_update_embedded_data # code path, and represent just rendering previews; there should be no # real content changes. # It doesn't really matter what we set `acting_user_id` in this case, # becuase we know this event isn't meant to send notifications. # shallow copy, but deep enough for our needs # We need to do another shallow copy, or we risk # sending the same event to multiple clients. # Never send notifications if the sender has been muted # We don't do offline notifications for PMs, because # we already notified the user of the original message # Don't spam people with duplicate mentions. This is # especially important considering that most message # edits are simple typo corrections. # # Note that prior_mention_user_ids contains users who received # a wildcard mention as well as normal mentions. # # TODO: Ideally, that would mean that we exclude here cases # where user_profile.wildcard_mentions_notify=False and have # those still send a notification. However, we don't have the # data to determine whether or not that was the case at the # time the original message was sent, so we can't do that # without extending the UserMessage data model. # Currently we assume that if this flag is set to True, then # the user already was notified about the earlier message, # so we short circuit. We may handle this more rigorously # in the future by looking at something like an AlreadyNotified # model. # We don't yet support custom user group mentions for message edit notifications. # Users will still receive notifications (because of the mentioned flag), but those # will be as if they were mentioned personally. # do_send_messages used to send events with users in dict format, with the # dict containing the user_id and other data. We later trimmed down the user # data to only contain the user_id and the usermessage flags, and put everything # else in the event dict as lists. # This block handles any old-format events still in the queue during upgrade. # Back-calculate the older all-booleans format data in the `users` dicts into the newer # all-lists format, and attach the lists to the `event` object. # TODO/compatibility: Another translation code block for the rename of # `always_push_notify` to `online_push_enabled`. Remove this # when one can no longer directly upgrade from 4.x to 5.0-dev. # We can calculate `mentioned` from the usermessage flags, so just remove it # TODO/compatibility: Remove this whole block once one can no # longer directly upgrade directly from 4.x to 5.0-dev. # do_delete_messages used to send events with users in # dict format {"id": <int>} This block is here for # compatibility with events in that format still in the # queue at the time of upgrade. # # TODO/compatibility: Remove this block once you can no # longer directly upgrade directly from 4.x to main. | 1.67082 | 2 |
utilities/configuration.py | merretbuurman/rapydo-utils | 0 | 6624933 | # -*- coding: utf-8 -*-
# import os
from utilities import PROJECT_CONF_FILENAME, PROJECTS_DEFAULTS_FILE
from utilities.myyaml import load_yaml_file
from utilities.logs import get_logger
log = get_logger(__name__)
# SCRIPT_PATH = helpers.script_abspath(__file__)
def load_project_configuration(project_path):
args = {
'path': project_path,
'skip_error': False,
'logger': False,
'file': PROJECT_CONF_FILENAME,
'keep_order': True
}
return load_yaml_file(**args)
def read(base_path, project_path=None, is_template=False, do_exit=True):
"""
Read default configuration
"""
project_configuration_files = [
# DEFAULT
{
# 'path': SCRIPT_PATH,
'path': base_path,
'skip_error': False,
'logger': False,
'file': PROJECTS_DEFAULTS_FILE
}
]
if project_path is not None:
project_configuration_files.append(
# CUSTOM FROM THE USER
{
'path': project_path,
'skip_error': False,
'logger': False,
'file': PROJECT_CONF_FILENAME
}
)
confs = {}
for args in project_configuration_files:
try:
args['keep_order'] = True
f = args['file']
confs[f] = load_yaml_file(**args)
log.checked("Found '%s' rapydo configuration" % f)
except AttributeError as e:
if do_exit:
log.exit(e)
else:
raise AttributeError(e)
# Recover the two options
base_configuration = confs.get(PROJECTS_DEFAULTS_FILE)
if project_path is None:
return base_configuration
custom_configuration = confs.get(PROJECT_CONF_FILENAME, {})
# Verify custom project configuration
prj = custom_configuration.get('project')
if prj is None:
raise AttributeError("Missing project configuration")
elif not is_template:
# Check if these three variables were changed from the initial template
checks = {
'title': 'My project',
'description': 'Title of my project',
'name': 'rapydo'
}
for key, value in checks.items():
if prj.get(key, '') == value:
# get file name with the load file utility
args = {}
kwargs = project_configuration_files.pop()
filepath = load_yaml_file(
*args, return_path=True, **kwargs)
log.critical_exit(
"\n\nYour project is not yet configured:\n" +
"Please edit key '%s' in file %s" % (key, filepath)
)
# Mix default and custom configuration
return mix(base_configuration, custom_configuration)
def mix(base, custom):
for key, elements in custom.items():
if key not in base:
# log.info("Adding %s to configuration" % key)
base[key] = custom[key]
continue
if isinstance(elements, dict):
mix(base[key], custom[key])
elif isinstance(elements, list):
for e in elements:
base[key].append(e)
else:
# log.info("Replacing default %s in configuration" % key)
base[key] = elements
return base
| # -*- coding: utf-8 -*-
# import os
from utilities import PROJECT_CONF_FILENAME, PROJECTS_DEFAULTS_FILE
from utilities.myyaml import load_yaml_file
from utilities.logs import get_logger
log = get_logger(__name__)
# SCRIPT_PATH = helpers.script_abspath(__file__)
def load_project_configuration(project_path):
args = {
'path': project_path,
'skip_error': False,
'logger': False,
'file': PROJECT_CONF_FILENAME,
'keep_order': True
}
return load_yaml_file(**args)
def read(base_path, project_path=None, is_template=False, do_exit=True):
"""
Read default configuration
"""
project_configuration_files = [
# DEFAULT
{
# 'path': SCRIPT_PATH,
'path': base_path,
'skip_error': False,
'logger': False,
'file': PROJECTS_DEFAULTS_FILE
}
]
if project_path is not None:
project_configuration_files.append(
# CUSTOM FROM THE USER
{
'path': project_path,
'skip_error': False,
'logger': False,
'file': PROJECT_CONF_FILENAME
}
)
confs = {}
for args in project_configuration_files:
try:
args['keep_order'] = True
f = args['file']
confs[f] = load_yaml_file(**args)
log.checked("Found '%s' rapydo configuration" % f)
except AttributeError as e:
if do_exit:
log.exit(e)
else:
raise AttributeError(e)
# Recover the two options
base_configuration = confs.get(PROJECTS_DEFAULTS_FILE)
if project_path is None:
return base_configuration
custom_configuration = confs.get(PROJECT_CONF_FILENAME, {})
# Verify custom project configuration
prj = custom_configuration.get('project')
if prj is None:
raise AttributeError("Missing project configuration")
elif not is_template:
# Check if these three variables were changed from the initial template
checks = {
'title': 'My project',
'description': 'Title of my project',
'name': 'rapydo'
}
for key, value in checks.items():
if prj.get(key, '') == value:
# get file name with the load file utility
args = {}
kwargs = project_configuration_files.pop()
filepath = load_yaml_file(
*args, return_path=True, **kwargs)
log.critical_exit(
"\n\nYour project is not yet configured:\n" +
"Please edit key '%s' in file %s" % (key, filepath)
)
# Mix default and custom configuration
return mix(base_configuration, custom_configuration)
def mix(base, custom):
for key, elements in custom.items():
if key not in base:
# log.info("Adding %s to configuration" % key)
base[key] = custom[key]
continue
if isinstance(elements, dict):
mix(base[key], custom[key])
elif isinstance(elements, list):
for e in elements:
base[key].append(e)
else:
# log.info("Replacing default %s in configuration" % key)
base[key] = elements
return base
| en | 0.561383 | # -*- coding: utf-8 -*- # import os # SCRIPT_PATH = helpers.script_abspath(__file__) Read default configuration # DEFAULT # 'path': SCRIPT_PATH, # CUSTOM FROM THE USER # Recover the two options # Verify custom project configuration # Check if these three variables were changed from the initial template # get file name with the load file utility # Mix default and custom configuration # log.info("Adding %s to configuration" % key) # log.info("Replacing default %s in configuration" % key) | 2.427654 | 2 |
cn/study/days100/days016/05SeqSearch.py | Jasonandy/Python-X | 0 | 6624934 | <filename>cn/study/days100/days016/05SeqSearch.py<gh_stars>0
"""
顺序查找
"""
from time import time
def seq_search(items, key):
"""顺序查找"""
for index, item in enumerate(items):
if item == key:
return index
return -1
def main():
origin = [1, 22, 13, 34, 55, 26, 12, 32, 32, 23, 45, 34, 78, 89, 67, 56, 12, 32, 92]
print(origin)
start_time = time()
sort = seq_search(origin,26)
end_time = time()
print('排序CostTime: %.2f (s) \n排序后的列表: %s ' % (end_time - start_time, sort))
if __name__ == '__main__':
main()
| <filename>cn/study/days100/days016/05SeqSearch.py<gh_stars>0
"""
顺序查找
"""
from time import time
def seq_search(items, key):
"""顺序查找"""
for index, item in enumerate(items):
if item == key:
return index
return -1
def main():
origin = [1, 22, 13, 34, 55, 26, 12, 32, 32, 23, 45, 34, 78, 89, 67, 56, 12, 32, 92]
print(origin)
start_time = time()
sort = seq_search(origin,26)
end_time = time()
print('排序CostTime: %.2f (s) \n排序后的列表: %s ' % (end_time - start_time, sort))
if __name__ == '__main__':
main()
| none | 1 | 2.839732 | 3 | |
tags/1.0.1/ipaddr_test.py | nouiz/fredericbastien-ipaddr-py-speed-up | 2 | 6624935 | <reponame>nouiz/fredericbastien-ipaddr-py-speed-up
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import ipaddr
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')
def testGetNetwork(self):
self.assertEqual(self.ipv4.network, 16909056)
self.assertEqual(self.ipv4.network_ext, '1.2.3.0')
self.assertEqual(self.ipv4_hostmask.network_ext, '10.0.0.0')
self.assertEqual(self.ipv6.network,
42540616829182469433403647294022090752)
self.assertEqual(self.ipv6.network_ext,
'2001:658:22A:CAFE::')
self.assertEqual(self.ipv6.hostmask_ext,
'::FFFF:FFFF:FFFF:FFFF')
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4(16909060).ip)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, 2**32)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, -1)
self.assertEqual(self.ipv6.ip,
ipaddr.IPv6(42540616829182469433547762482097946625).ip)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6, 2**128)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6, -1)
self.assertEqual(ipaddr.IP(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IP(self.ipv6.ip).version, 6)
def testGetIp(self):
self.assertEqual(self.ipv4.ip, 16909060)
self.assertEqual(self.ipv4.ip_ext, '1.2.3.4')
self.assertEqual(self.ipv4.ip_ext_full, '1.2.3.4')
self.assertEqual(self.ipv4_hostmask.ip_ext, '10.0.0.1')
self.assertEqual(self.ipv6.ip, 42540616829182469433547762482097946625)
self.assertEqual(self.ipv6.ip_ext,
'fc00:db20:35b:7399::5')
self.assertEqual(self.ipv6.ip_ext_full,
'fc00:e968:6179::de52:7100')
def testGetNetmask(self):
self.assertEqual(self.ipv4.netmask, 4294967040L)
self.assertEqual(self.ipv4.netmask_ext, '255.255.255.0')
self.assertEqual(self.ipv4_hostmask.netmask_ext, '255.0.0.0')
self.assertEqual(self.ipv6.netmask,
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.netmask_ext, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4('1.2.3.4/0')
self.assertEqual(ipv4_zero_netmask.netmask, 0)
self.assert_(ipv4_zero_netmask._IsValidNetmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6('::1/0')
self.assertEqual(ipv6_zero_netmask.netmask, 0)
self.assert_(ipv6_zero_netmask._IsValidNetmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(self.ipv4.broadcast, 16909311L)
self.assertEqual(self.ipv4.broadcast_ext, '1.2.3.255')
self.assertEqual(self.ipv6.broadcast,
42540616829182469451850391367731642367)
self.assertEqual(self.ipv6.broadcast_ext,
'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.Supernet().prefixlen, 23)
self.assertEqual(self.ipv4.Supernet().network_ext, '1.2.2.0')
self.assertEqual(ipaddr.IPv4('0.0.0.0/0').Supernet(),
ipaddr.IPv4('0.0.0.0/0'))
self.assertEqual(self.ipv6.Supernet().prefixlen, 63)
self.assertEqual(self.ipv6.Supernet().network_ext,
'2001:658:22A:CAFE::')
self.assertEqual(ipaddr.IPv6('::0/0').Supernet(), ipaddr.IPv6('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.Supernet(3).prefixlen, 21)
self.assertEqual(self.ipv4.Supernet(3).network_ext, '1.2.0.0')
self.assertEqual(self.ipv6.Supernet(3).prefixlen, 61)
self.assertEqual(self.ipv6.Supernet(3).network_ext,
'2001:658:22A:CAF8::')
def testGetSubnet(self):
self.assertEqual(self.ipv4.Subnet()[0].prefixlen, 25)
self.assertEqual(self.ipv4.Subnet()[0].network_ext, '1.2.3.0')
self.assertEqual(self.ipv4.Subnet()[1].network_ext, '192.168.127.12')
self.assertEqual(self.ipv6.Subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4('1.2.3.4/32')
subnets1 = [str(x) for x in ip.Subnet()]
subnets2 = [str(x) for x in ip.Subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6('::1/128')
subnets1 = [str(x) for x in ip.Subnet()]
subnets2 = [str(x) for x in ip.Subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.Subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.Subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22A:CAFE::/66',
'2001:658:22A:CAFE:4000::/66',
'2001:658:22A:CAFE:8000::/66',
'2001:658:22A:CAFE:C000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Subnet, 9)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Supernet, 25)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Subnet, -1)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.Subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.Supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.Subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.Supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(self.ipv4.Contains(ipaddr.IPv4('192.168.127.12/25')))
self.assertTrue(ipaddr.IPv4('192.168.127.12/25') in self.ipv4)
self.assertFalse(self.ipv4.Contains(ipaddr.IPv4('1.2.4.1/24')))
self.assertFalse(ipaddr.IPv4('1.2.4.1/24') in self.ipv4)
self.assertFalse(self.ipv4 in self.ipv6)
self.assertFalse(self.ipv6 in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
def testBadAddress(self):
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4, 'poop')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, '1.2.3.256')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6, 'poopv6')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, '1.2.3.4/32/24')
def testBadNetMask(self):
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/33')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6, '::1/')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6, '::1/129')
def testNth(self):
self.assertEqual(self.ipv4[5], '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(self.ipv6[5],
'fc00:db20:35b:7399::5')
def testEquals(self):
self.assertTrue(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.4/24')))
self.assertFalse(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.4/23')))
self.assertFalse(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.5/24')))
self.assertTrue(self.ipv6.__eq__(
ipaddr.IPv6('fc00:db20:35b:7399::5/64')))
self.assertFalse(self.ipv6.__eq__(
ipaddr.IPv6('fc00:db20:35b:7399::5/63')))
self.assertFalse(self.ipv6.__eq__(
ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')))
def testSlash32Constructor(self):
self.assertEquals(str(ipaddr.IPv4('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEquals(str(ipaddr.IPv6('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEquals(str(ipaddr.IPv4('1.2.3.4/0.0.0.0')), '1.2.3.4/0')
def testCollapsing(self):
ip1 = ipaddr.IPv4('1.1.0.0/24')
ip2 = ipaddr.IPv4('1.1.1.0/24')
ip3 = ipaddr.IPv4('1.1.2.0/24')
ip4 = ipaddr.IPv4('1.1.3.0/24')
ip5 = ipaddr.IPv4('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
# and we want that sort to call ipaddr.IP.__cmp__() on our array members
ip6 = ipaddr.IPv4('1.1.0.0/22')
# check that addreses are subsumed properlly.
collapsed = ipaddr.CollapseAddrList([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4('1.1.0.0/22'),
ipaddr.IPv4('1.1.4.0/24')])
# test that two addresses are supernet'ed properlly
collapsed = ipaddr.CollapseAddrList([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4('1.1.0.0/23')])
ip_same1 = ip_same2 = ipaddr.IPv4('1.1.1.1/32')
self.assertEqual(ipaddr.CollapseAddrList([ip_same1, ip_same2]), [ip_same1])
ip1 = ipaddr.IPv6('::2001:1/100')
ip2 = ipaddr.IPv6('::2002:1/120')
ip3 = ipaddr.IPv6('::2001:1/96')
# test that ipv6 addresses are subsumed properlly.
collapsed = ipaddr.CollapseAddrList([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4('1.1.1.0/24')
ip2 = ipaddr.IPv4('1.1.1.1/24')
ip3 = ipaddr.IPv4('1.1.2.0/24')
self.assertEquals(ip1.__cmp__(ip3), -1)
self.assertEquals(ip3.__cmp__(ip2), 1)
self.assertEquals(ip1.CompareNetworks(ip2), 0)
ip1 = ipaddr.IPv6('2001::2000/96')
ip2 = ipaddr.IPv6('2001::2001/96')
ip3 = ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/96')
self.assertEquals(ip1.__cmp__(ip3), -1)
self.assertEquals(ip3.__cmp__(ip2), 1)
self.assertEquals(ip1.CompareNetworks(ip2), 0)
# Test comparing different protocols
ipv6 = ipaddr.IPv6('::/0')
ipv4 = ipaddr.IPv4('0.0.0.0/0')
self.assertEquals(ipv6.__cmp__(ipv4), 1)
self.assertEquals(ipv4.__cmp__(ipv6), -1)
def testEmbeddedIPv4(self):
ipv4_string = '254.254.254.254'
ipv4 = ipaddr.IPv4(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6('::%s' % ipv4_string)
self.assertEquals(v4compat_ipv6.ip, ipv4.ip)
v4mapped_ipv6 = ipaddr.IPv6('::ffff:%s' % ipv4_string)
self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6,
'2001:1.1.1.1:1.1.1.1')
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4('1.2.3.4/24')
self.assertEquals(ipv4._IpStrFromPrefixlen(), '255.255.255.0')
self.assertEquals(ipv4._IpStrFromPrefixlen(28), '255.255.255.240')
def testIpType(self):
ipv4 = ipaddr.IP('1.2.3.4')
ipv6 = ipaddr.IP('::1.2.3.4')
self.assertEquals(ipaddr.IPv4, type(ipv4))
self.assertEquals(ipaddr.IPv6, type(ipv6))
def testReserved(self):
self.assertEquals(True, ipaddr.IP('172.16.31.10/31').IsMulticast())
self.assertEquals(True, ipaddr.IP('192.168.1.1/17').IsRFC1918())
self.assertEquals(True, ipaddr.IP('169.254.100.200/24').IsLinkLocal())
self.assertEquals(True, ipaddr.IP('127.100.200.254/32').IsLoopback())
def testAddrExclude(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IP('10.1.1.0/26')
addr3 = ipaddr.IP('10.2.1.0/24')
self.assertEqual(addr1.AddressExclude(addr2),
[ipaddr.IP('10.1.1.64/26'),
ipaddr.IP('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.AddressExclude, addr3)
def testHash(self):
self.assertEquals(hash(ipaddr.IP('10.1.1.0/24')),
hash(ipaddr.IP('10.1.1.0/24')))
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
self.assertTrue(dummy.has_key(self.ipv4))
def testIPv4PrefixFromInt(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IPv4(addr1.ip) # clone prefix
addr2.SetPrefix(addr1.prefixlen)
addr3 = ipaddr.IP(123456)
self.assertEqual(123456, addr3.ip)
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
addr2.SetPrefix, -1L)
self.assertEqual(addr1, addr2)
self.assertEqual(str(addr1), str(addr2))
def testIPv6PrefixFromInt(self):
addr1 = ipaddr.IP('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')
addr2 = ipaddr.IPv6(addr1.ip) # clone prefix
addr2.SetPrefix(addr1.prefixlen)
addr3 = ipaddr.IP(123456)
self.assertEqual(123456, addr3.ip)
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
addr2.SetPrefix, -1L)
self.assertEqual(addr1, addr2)
self.assertEqual(str(addr1), str(addr2))
def testCopyConstructor(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IP(addr1)
addr3 = ipaddr.IP('fc00:db20:35b:7399::5/64')
addr4 = ipaddr.IP(addr3)
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
def testCompressIPv6Address(self):
test_addresses = {
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'fdf8:f53e:61e4::18': 'fc00:db20:35b:7399::5/128',
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b': '::4:0:0:0:FFFF/128',
'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b': '::5:0:0:FFFF/128',
'fc00:db20:35b:7399::5': 'fdf8:f53e:61e4::18:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'fdf8:f53e:61e4::18/66': '2001:658:22A:CAFE::/66',
}
for uncompressed, compressed in test_addresses.items():
self.assertEquals(compressed, str(ipaddr.IPv6(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6('2001::1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001',
addr1._ExplodeShortHandIpStr(addr1.ip_ext))
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import ipaddr
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')
def testGetNetwork(self):
self.assertEqual(self.ipv4.network, 16909056)
self.assertEqual(self.ipv4.network_ext, '1.2.3.0')
self.assertEqual(self.ipv4_hostmask.network_ext, '10.0.0.0')
self.assertEqual(self.ipv6.network,
42540616829182469433403647294022090752)
self.assertEqual(self.ipv6.network_ext,
'2001:658:22A:CAFE::')
self.assertEqual(self.ipv6.hostmask_ext,
'::FFFF:FFFF:FFFF:FFFF')
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4(16909060).ip)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, 2**32)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, -1)
self.assertEqual(self.ipv6.ip,
ipaddr.IPv6(42540616829182469433547762482097946625).ip)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6, 2**128)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6, -1)
self.assertEqual(ipaddr.IP(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IP(self.ipv6.ip).version, 6)
def testGetIp(self):
self.assertEqual(self.ipv4.ip, 16909060)
self.assertEqual(self.ipv4.ip_ext, '1.2.3.4')
self.assertEqual(self.ipv4.ip_ext_full, '1.2.3.4')
self.assertEqual(self.ipv4_hostmask.ip_ext, '10.0.0.1')
self.assertEqual(self.ipv6.ip, 42540616829182469433547762482097946625)
self.assertEqual(self.ipv6.ip_ext,
'fc00:db20:35b:7399::5')
self.assertEqual(self.ipv6.ip_ext_full,
'fc00:e968:6179::de52:7100')
def testGetNetmask(self):
self.assertEqual(self.ipv4.netmask, 4294967040L)
self.assertEqual(self.ipv4.netmask_ext, '255.255.255.0')
self.assertEqual(self.ipv4_hostmask.netmask_ext, '255.0.0.0')
self.assertEqual(self.ipv6.netmask,
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.netmask_ext, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4('1.2.3.4/0')
self.assertEqual(ipv4_zero_netmask.netmask, 0)
self.assert_(ipv4_zero_netmask._IsValidNetmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6('::1/0')
self.assertEqual(ipv6_zero_netmask.netmask, 0)
self.assert_(ipv6_zero_netmask._IsValidNetmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(self.ipv4.broadcast, 16909311L)
self.assertEqual(self.ipv4.broadcast_ext, '1.2.3.255')
self.assertEqual(self.ipv6.broadcast,
42540616829182469451850391367731642367)
self.assertEqual(self.ipv6.broadcast_ext,
'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.Supernet().prefixlen, 23)
self.assertEqual(self.ipv4.Supernet().network_ext, '1.2.2.0')
self.assertEqual(ipaddr.IPv4('0.0.0.0/0').Supernet(),
ipaddr.IPv4('0.0.0.0/0'))
self.assertEqual(self.ipv6.Supernet().prefixlen, 63)
self.assertEqual(self.ipv6.Supernet().network_ext,
'2001:658:22A:CAFE::')
self.assertEqual(ipaddr.IPv6('::0/0').Supernet(), ipaddr.IPv6('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.Supernet(3).prefixlen, 21)
self.assertEqual(self.ipv4.Supernet(3).network_ext, '1.2.0.0')
self.assertEqual(self.ipv6.Supernet(3).prefixlen, 61)
self.assertEqual(self.ipv6.Supernet(3).network_ext,
'2001:658:22A:CAF8::')
def testGetSubnet(self):
self.assertEqual(self.ipv4.Subnet()[0].prefixlen, 25)
self.assertEqual(self.ipv4.Subnet()[0].network_ext, '1.2.3.0')
self.assertEqual(self.ipv4.Subnet()[1].network_ext, '192.168.127.12')
self.assertEqual(self.ipv6.Subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4('1.2.3.4/32')
subnets1 = [str(x) for x in ip.Subnet()]
subnets2 = [str(x) for x in ip.Subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6('::1/128')
subnets1 = [str(x) for x in ip.Subnet()]
subnets2 = [str(x) for x in ip.Subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.Subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.Subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22A:CAFE::/66',
'2001:658:22A:CAFE:4000::/66',
'2001:658:22A:CAFE:8000::/66',
'2001:658:22A:CAFE:C000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Subnet, 9)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Supernet, 25)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.Subnet, -1)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.Subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.Subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.Supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.Subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.Supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(self.ipv4.Contains(ipaddr.IPv4('192.168.127.12/25')))
self.assertTrue(ipaddr.IPv4('192.168.127.12/25') in self.ipv4)
self.assertFalse(self.ipv4.Contains(ipaddr.IPv4('1.2.4.1/24')))
self.assertFalse(ipaddr.IPv4('1.2.4.1/24') in self.ipv4)
self.assertFalse(self.ipv4 in self.ipv6)
self.assertFalse(self.ipv6 in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
def testBadAddress(self):
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4, 'poop')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, '1.2.3.256')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6, 'poopv6')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4, '1.2.3.4/32/24')
def testBadNetMask(self):
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/33')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6, '::1/')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6, '::1/129')
def testNth(self):
self.assertEqual(self.ipv4[5], '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(self.ipv6[5],
'fc00:db20:35b:7399::5')
def testEquals(self):
self.assertTrue(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.4/24')))
self.assertFalse(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.4/23')))
self.assertFalse(self.ipv4.__eq__(ipaddr.IPv4('1.2.3.5/24')))
self.assertTrue(self.ipv6.__eq__(
ipaddr.IPv6('fc00:db20:35b:7399::5/64')))
self.assertFalse(self.ipv6.__eq__(
ipaddr.IPv6('fc00:db20:35b:7399::5/63')))
self.assertFalse(self.ipv6.__eq__(
ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')))
def testSlash32Constructor(self):
self.assertEquals(str(ipaddr.IPv4('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEquals(str(ipaddr.IPv6('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEquals(str(ipaddr.IPv4('1.2.3.4/0.0.0.0')), '1.2.3.4/0')
def testCollapsing(self):
ip1 = ipaddr.IPv4('1.1.0.0/24')
ip2 = ipaddr.IPv4('1.1.1.0/24')
ip3 = ipaddr.IPv4('1.1.2.0/24')
ip4 = ipaddr.IPv4('1.1.3.0/24')
ip5 = ipaddr.IPv4('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
# and we want that sort to call ipaddr.IP.__cmp__() on our array members
ip6 = ipaddr.IPv4('1.1.0.0/22')
# check that addreses are subsumed properlly.
collapsed = ipaddr.CollapseAddrList([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4('1.1.0.0/22'),
ipaddr.IPv4('1.1.4.0/24')])
# test that two addresses are supernet'ed properlly
collapsed = ipaddr.CollapseAddrList([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4('1.1.0.0/23')])
ip_same1 = ip_same2 = ipaddr.IPv4('1.1.1.1/32')
self.assertEqual(ipaddr.CollapseAddrList([ip_same1, ip_same2]), [ip_same1])
ip1 = ipaddr.IPv6('::2001:1/100')
ip2 = ipaddr.IPv6('::2002:1/120')
ip3 = ipaddr.IPv6('::2001:1/96')
# test that ipv6 addresses are subsumed properlly.
collapsed = ipaddr.CollapseAddrList([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4('1.1.1.0/24')
ip2 = ipaddr.IPv4('1.1.1.1/24')
ip3 = ipaddr.IPv4('1.1.2.0/24')
self.assertEquals(ip1.__cmp__(ip3), -1)
self.assertEquals(ip3.__cmp__(ip2), 1)
self.assertEquals(ip1.CompareNetworks(ip2), 0)
ip1 = ipaddr.IPv6('2001::2000/96')
ip2 = ipaddr.IPv6('2001::2001/96')
ip3 = ipaddr.IPv6('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/96')
self.assertEquals(ip1.__cmp__(ip3), -1)
self.assertEquals(ip3.__cmp__(ip2), 1)
self.assertEquals(ip1.CompareNetworks(ip2), 0)
# Test comparing different protocols
ipv6 = ipaddr.IPv6('::/0')
ipv4 = ipaddr.IPv4('0.0.0.0/0')
self.assertEquals(ipv6.__cmp__(ipv4), 1)
self.assertEquals(ipv4.__cmp__(ipv6), -1)
def testEmbeddedIPv4(self):
ipv4_string = '254.254.254.254'
ipv4 = ipaddr.IPv4(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6('::%s' % ipv4_string)
self.assertEquals(v4compat_ipv6.ip, ipv4.ip)
v4mapped_ipv6 = ipaddr.IPv6('::ffff:%s' % ipv4_string)
self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6,
'2001:1.1.1.1:1.1.1.1')
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4('1.2.3.4/24')
self.assertEquals(ipv4._IpStrFromPrefixlen(), '255.255.255.0')
self.assertEquals(ipv4._IpStrFromPrefixlen(28), '255.255.255.240')
def testIpType(self):
ipv4 = ipaddr.IP('1.2.3.4')
ipv6 = ipaddr.IP('::1.2.3.4')
self.assertEquals(ipaddr.IPv4, type(ipv4))
self.assertEquals(ipaddr.IPv6, type(ipv6))
def testReserved(self):
self.assertEquals(True, ipaddr.IP('172.16.31.10/31').IsMulticast())
self.assertEquals(True, ipaddr.IP('192.168.1.1/17').IsRFC1918())
self.assertEquals(True, ipaddr.IP('169.254.100.200/24').IsLinkLocal())
self.assertEquals(True, ipaddr.IP('127.100.200.254/32').IsLoopback())
def testAddrExclude(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IP('10.1.1.0/26')
addr3 = ipaddr.IP('10.2.1.0/24')
self.assertEqual(addr1.AddressExclude(addr2),
[ipaddr.IP('10.1.1.64/26'),
ipaddr.IP('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.AddressExclude, addr3)
def testHash(self):
self.assertEquals(hash(ipaddr.IP('10.1.1.0/24')),
hash(ipaddr.IP('10.1.1.0/24')))
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
self.assertTrue(dummy.has_key(self.ipv4))
def testIPv4PrefixFromInt(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IPv4(addr1.ip) # clone prefix
addr2.SetPrefix(addr1.prefixlen)
addr3 = ipaddr.IP(123456)
self.assertEqual(123456, addr3.ip)
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
addr2.SetPrefix, -1L)
self.assertEqual(addr1, addr2)
self.assertEqual(str(addr1), str(addr2))
def testIPv6PrefixFromInt(self):
addr1 = ipaddr.IP('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64')
addr2 = ipaddr.IPv6(addr1.ip) # clone prefix
addr2.SetPrefix(addr1.prefixlen)
addr3 = ipaddr.IP(123456)
self.assertEqual(123456, addr3.ip)
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
addr2.SetPrefix, -1L)
self.assertEqual(addr1, addr2)
self.assertEqual(str(addr1), str(addr2))
def testCopyConstructor(self):
addr1 = ipaddr.IP('10.1.1.0/24')
addr2 = ipaddr.IP(addr1)
addr3 = ipaddr.IP('fc00:db20:35b:7399::5/64')
addr4 = ipaddr.IP(addr3)
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
def testCompressIPv6Address(self):
test_addresses = {
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'fdf8:f53e:61e4::18': 'fc00:db20:35b:7399::5/128',
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b': '::4:0:0:0:FFFF/128',
'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b': '::5:0:0:FFFF/128',
'fc00:db20:35b:7399::5': 'fdf8:f53e:61e4::18:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'fdf8:f53e:61e4::18/66': '2001:658:22A:CAFE::/66',
}
for uncompressed, compressed in test_addresses.items():
self.assertEquals(compressed, str(ipaddr.IPv6(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6('2001::1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001',
addr1._ExplodeShortHandIpStr(addr1.ip_ext))
if __name__ == '__main__':
unittest.main() | en | 0.820111 | #!/usr/bin/python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Unittest for ipaddr module. # stored in no particular order b/c we want CollapseAddr to call [].sort # and we want that sort to call ipaddr.IP.__cmp__() on our array members # check that addreses are subsumed properlly. # test that two addresses are supernet'ed properlly # test that ipv6 addresses are subsumed properlly. # ip1 and ip2 have the same network address # Test comparing different protocols # clone prefix # clone prefix | 2.344627 | 2 |
Main/image_net_scraping.py | DominiquePaul/iwi-image-classifier | 0 | 6624936 | import numpy as np
from preprocessing import create_imagenet_dataset, create_imagenet_dataset_random
SIZE = 2000
synset_ids = ["n03051540", "n03051540"]
object_names = ["fashion","automotive"]
for synset_id, object in zip(synset_ids, object_names):
print("Starting to process the {} dataset".format(object))
imgs_object = create_imagenet_dataset(synset_id=synset_id, size=SIZE, use_child_synsets=True)
imgs_random = create_imagenet_dataset_random(size=SIZE, max_synset_imgs=20, forbidden_synset=synset_id, exclude_synset_children=True)
imgnet_imgs = np.concatenate((imgs_object, imgs_random))
imgnet_labels = np.array([1]*len(imgs_object) + [0]*len(imgs_random))
random_order = np.random.permutation(len(imgnet_imgs))
imgnet_imgs = imgnet_imgs[random_order]
imgnet_labels = imgnet_labels[random_order]
np.save("imgnet_{}_x".format(object), imgnet_imgs)
np.save("imgnet_{}_y".format(object), imgnet_labels)
print("Finished processing the {} dataset".format(object))
| import numpy as np
from preprocessing import create_imagenet_dataset, create_imagenet_dataset_random
SIZE = 2000
synset_ids = ["n03051540", "n03051540"]
object_names = ["fashion","automotive"]
for synset_id, object in zip(synset_ids, object_names):
print("Starting to process the {} dataset".format(object))
imgs_object = create_imagenet_dataset(synset_id=synset_id, size=SIZE, use_child_synsets=True)
imgs_random = create_imagenet_dataset_random(size=SIZE, max_synset_imgs=20, forbidden_synset=synset_id, exclude_synset_children=True)
imgnet_imgs = np.concatenate((imgs_object, imgs_random))
imgnet_labels = np.array([1]*len(imgs_object) + [0]*len(imgs_random))
random_order = np.random.permutation(len(imgnet_imgs))
imgnet_imgs = imgnet_imgs[random_order]
imgnet_labels = imgnet_labels[random_order]
np.save("imgnet_{}_x".format(object), imgnet_imgs)
np.save("imgnet_{}_y".format(object), imgnet_labels)
print("Finished processing the {} dataset".format(object))
| none | 1 | 2.439271 | 2 | |
sumo_docker_pipeline/pipeline.py | Kensuke-Mitsuzawa/sumo_docker_pipeline | 0 | 6624937 | import typing
import joblib
from datetime import datetime
from pathlib import Path
from typing import Optional
from sumo_docker_pipeline.logger_unit import logger
from sumo_docker_pipeline.operation_module.docker_operation_module import SumoDockerController
from sumo_docker_pipeline.operation_module.local_operation_module import LocalSumoController
from sumo_docker_pipeline.commons.result_module import SumoResultObjects
from sumo_docker_pipeline.commons.sumo_config_obj import SumoConfigObject
import time
class BasePipeline(object):
def __init__(self,
path_working_dir: typing.Optional[Path] = None,
n_jobs: int = 1):
self.n_jobs = n_jobs
if path_working_dir is None:
self.path_working_dir = Path('/tmp').joinpath('sumo_docker_pipeline').absolute()
else:
self.path_working_dir = path_working_dir.absolute()
# end if
def get_data_directory(self) -> Path:
raise NotImplementedError()
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> SumoResultObjects:
raise NotImplementedError()
class LocalSumoPipeline(BasePipeline):
def __init__(self,
is_rewrite_windows_path: bool = True,
path_working_dir: Path = None,
n_jobs: int = 1,
sumo_command: str = '/bin/sumo'):
"""A pipeline interface to run SUMO-docker.
Args:
path_working_dir: a path to save tmp files.
n_jobs: the number of cores.
"""
super(LocalSumoPipeline, self).__init__(path_working_dir=path_working_dir, n_jobs=n_jobs)
self.sumo_command = sumo_command
# todo move Template2SuMoConfig to other module.
# self.template_generator = Template2SuMoConfig(path_config_file=str(path_config_file),
# path_destination_dir=str(path_destination_scenario))
self.is_rewrite_windows_path = is_rewrite_windows_path
def one_simulation(self, sumo_config_object: SumoConfigObject) -> typing.Tuple[str, SumoResultObjects]:
sumo_controller = LocalSumoController(sumo_command=self.sumo_command)
sumo_result_obj = sumo_controller.start_job(sumo_config=sumo_config_object)
return sumo_config_object.scenario_name, sumo_result_obj
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> typing.Dict[str, SumoResultObjects]:
"""Run SUMO simulation.
Args:
sumo_configs: List of SUMO Config objects.
is_overwrite: True, then the method overwrites outputs from SUMO. False raises Exception if there is a destination directory already. Default False.
Returns: {scenario-name: `SumoResultObjects`}
"""
logger.info(f'running sumo simulator now...')
sumo_result_objects = joblib.Parallel(n_jobs=self.n_jobs)(joblib.delayed(
self.one_simulation)(conf) for conf in sumo_configs)
logger.info(f'done the simulation.')
_ = dict(sumo_result_objects)
return _
class DockerPipeline(BasePipeline):
def __init__(self,
path_mount_working_dir: Optional[Path] = None,
docker_image_name: str = 'kensukemi/sumo-ubuntu18',
is_rewrite_windows_path: bool = True,
n_jobs: int = 1,
time_interval_future_check: float = 3.0,
limit_max_wait: float = 3600):
"""A pipeline interface to run SUMO-docker.
Args:
The other config files should be in the same directory (or under the sub-directory)
path_mount_working_dir: A path to directory where a container mount as the shared directory.
docker_image_name: A name of docker-image that you call.
is_rewrite_windows_path: True, then the class updates Path only when your OS is Windows.
n_jobs: the number of parallel computations.
time_interval_future_check: Time interval to check Task status.
limit_max_wait: Time limit (seconds) to force end process.
"""
super(DockerPipeline, self).__init__(path_working_dir=path_mount_working_dir, n_jobs=n_jobs)
self.path_mount_working_dir = self.path_working_dir
self.docker_image_name = docker_image_name
self.is_rewrite_windows_path = is_rewrite_windows_path
self.time_interval_future_check = time_interval_future_check
self.limit_max_wait = limit_max_wait
def get_data_directory(self) -> Path:
return self.path_mount_working_dir
def one_job(self, sumo_config_obj: SumoConfigObject) -> SumoResultObjects:
logger.debug(f'running sumo simulator now...')
time_stamp_current = datetime.now().timestamp()
sumo_controller = SumoDockerController(
container_name_base=f'sumo-docker-{sumo_config_obj.scenario_name}-{time_stamp_current}',
image_name=self.docker_image_name,
)
sumo_result_obj = sumo_controller.start_job(sumo_config=sumo_config_obj)
logger.debug(f'done the simulation.')
return sumo_result_obj
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> typing.Dict[str, SumoResultObjects]:
"""Run SUMO simulation in a docker container.
Args:
sumo_configs: List of SumoConfigObject.
is_overwrite: True, then the method overwrites outputs from SUMO.
False raises Exception if there is a destination directory already. Default False.
Returns:
dict {scenario-name: `SumoResultObjects`}
"""
logger.info(f'making the new config files')
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(self.n_jobs)
s_future_pool = []
logger.debug(f'starting tasks...')
for conf in sumo_configs:
future = pool.submit(self.one_job, (conf))
s_future_pool.append(future)
# end for
logger.debug(f'submitted all tasks.')
time_at_start = datetime.now()
logger.debug(f'waiting for task-ends...')
while True:
is_end_all = all(f.done() is True for f in s_future_pool)
if is_end_all:
break
time.sleep(self.time_interval_future_check)
if (datetime.now() - time_at_start).total_seconds() > self.limit_max_wait:
raise TimeoutError(f'We waited {self.limit_max_wait} seconds. Not finished yet.')
# end if
# end while
d_scenario_name2result = {}
for f_obj in s_future_pool:
r: SumoResultObjects = f_obj.result()
d_scenario_name2result[r.id_scenario] = r
# end for
return d_scenario_name2result
| import typing
import joblib
from datetime import datetime
from pathlib import Path
from typing import Optional
from sumo_docker_pipeline.logger_unit import logger
from sumo_docker_pipeline.operation_module.docker_operation_module import SumoDockerController
from sumo_docker_pipeline.operation_module.local_operation_module import LocalSumoController
from sumo_docker_pipeline.commons.result_module import SumoResultObjects
from sumo_docker_pipeline.commons.sumo_config_obj import SumoConfigObject
import time
class BasePipeline(object):
def __init__(self,
path_working_dir: typing.Optional[Path] = None,
n_jobs: int = 1):
self.n_jobs = n_jobs
if path_working_dir is None:
self.path_working_dir = Path('/tmp').joinpath('sumo_docker_pipeline').absolute()
else:
self.path_working_dir = path_working_dir.absolute()
# end if
def get_data_directory(self) -> Path:
raise NotImplementedError()
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> SumoResultObjects:
raise NotImplementedError()
class LocalSumoPipeline(BasePipeline):
def __init__(self,
is_rewrite_windows_path: bool = True,
path_working_dir: Path = None,
n_jobs: int = 1,
sumo_command: str = '/bin/sumo'):
"""A pipeline interface to run SUMO-docker.
Args:
path_working_dir: a path to save tmp files.
n_jobs: the number of cores.
"""
super(LocalSumoPipeline, self).__init__(path_working_dir=path_working_dir, n_jobs=n_jobs)
self.sumo_command = sumo_command
# todo move Template2SuMoConfig to other module.
# self.template_generator = Template2SuMoConfig(path_config_file=str(path_config_file),
# path_destination_dir=str(path_destination_scenario))
self.is_rewrite_windows_path = is_rewrite_windows_path
def one_simulation(self, sumo_config_object: SumoConfigObject) -> typing.Tuple[str, SumoResultObjects]:
sumo_controller = LocalSumoController(sumo_command=self.sumo_command)
sumo_result_obj = sumo_controller.start_job(sumo_config=sumo_config_object)
return sumo_config_object.scenario_name, sumo_result_obj
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> typing.Dict[str, SumoResultObjects]:
"""Run SUMO simulation.
Args:
sumo_configs: List of SUMO Config objects.
is_overwrite: True, then the method overwrites outputs from SUMO. False raises Exception if there is a destination directory already. Default False.
Returns: {scenario-name: `SumoResultObjects`}
"""
logger.info(f'running sumo simulator now...')
sumo_result_objects = joblib.Parallel(n_jobs=self.n_jobs)(joblib.delayed(
self.one_simulation)(conf) for conf in sumo_configs)
logger.info(f'done the simulation.')
_ = dict(sumo_result_objects)
return _
class DockerPipeline(BasePipeline):
def __init__(self,
path_mount_working_dir: Optional[Path] = None,
docker_image_name: str = 'kensukemi/sumo-ubuntu18',
is_rewrite_windows_path: bool = True,
n_jobs: int = 1,
time_interval_future_check: float = 3.0,
limit_max_wait: float = 3600):
"""A pipeline interface to run SUMO-docker.
Args:
The other config files should be in the same directory (or under the sub-directory)
path_mount_working_dir: A path to directory where a container mount as the shared directory.
docker_image_name: A name of docker-image that you call.
is_rewrite_windows_path: True, then the class updates Path only when your OS is Windows.
n_jobs: the number of parallel computations.
time_interval_future_check: Time interval to check Task status.
limit_max_wait: Time limit (seconds) to force end process.
"""
super(DockerPipeline, self).__init__(path_working_dir=path_mount_working_dir, n_jobs=n_jobs)
self.path_mount_working_dir = self.path_working_dir
self.docker_image_name = docker_image_name
self.is_rewrite_windows_path = is_rewrite_windows_path
self.time_interval_future_check = time_interval_future_check
self.limit_max_wait = limit_max_wait
def get_data_directory(self) -> Path:
return self.path_mount_working_dir
def one_job(self, sumo_config_obj: SumoConfigObject) -> SumoResultObjects:
logger.debug(f'running sumo simulator now...')
time_stamp_current = datetime.now().timestamp()
sumo_controller = SumoDockerController(
container_name_base=f'sumo-docker-{sumo_config_obj.scenario_name}-{time_stamp_current}',
image_name=self.docker_image_name,
)
sumo_result_obj = sumo_controller.start_job(sumo_config=sumo_config_obj)
logger.debug(f'done the simulation.')
return sumo_result_obj
def run_simulation(self,
sumo_configs: typing.List[SumoConfigObject],
is_overwrite: bool = False) -> typing.Dict[str, SumoResultObjects]:
"""Run SUMO simulation in a docker container.
Args:
sumo_configs: List of SumoConfigObject.
is_overwrite: True, then the method overwrites outputs from SUMO.
False raises Exception if there is a destination directory already. Default False.
Returns:
dict {scenario-name: `SumoResultObjects`}
"""
logger.info(f'making the new config files')
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(self.n_jobs)
s_future_pool = []
logger.debug(f'starting tasks...')
for conf in sumo_configs:
future = pool.submit(self.one_job, (conf))
s_future_pool.append(future)
# end for
logger.debug(f'submitted all tasks.')
time_at_start = datetime.now()
logger.debug(f'waiting for task-ends...')
while True:
is_end_all = all(f.done() is True for f in s_future_pool)
if is_end_all:
break
time.sleep(self.time_interval_future_check)
if (datetime.now() - time_at_start).total_seconds() > self.limit_max_wait:
raise TimeoutError(f'We waited {self.limit_max_wait} seconds. Not finished yet.')
# end if
# end while
d_scenario_name2result = {}
for f_obj in s_future_pool:
r: SumoResultObjects = f_obj.result()
d_scenario_name2result[r.id_scenario] = r
# end for
return d_scenario_name2result
| en | 0.566211 | # end if A pipeline interface to run SUMO-docker. Args: path_working_dir: a path to save tmp files. n_jobs: the number of cores. # todo move Template2SuMoConfig to other module. # self.template_generator = Template2SuMoConfig(path_config_file=str(path_config_file), # path_destination_dir=str(path_destination_scenario)) Run SUMO simulation. Args: sumo_configs: List of SUMO Config objects. is_overwrite: True, then the method overwrites outputs from SUMO. False raises Exception if there is a destination directory already. Default False. Returns: {scenario-name: `SumoResultObjects`} A pipeline interface to run SUMO-docker. Args: The other config files should be in the same directory (or under the sub-directory) path_mount_working_dir: A path to directory where a container mount as the shared directory. docker_image_name: A name of docker-image that you call. is_rewrite_windows_path: True, then the class updates Path only when your OS is Windows. n_jobs: the number of parallel computations. time_interval_future_check: Time interval to check Task status. limit_max_wait: Time limit (seconds) to force end process. Run SUMO simulation in a docker container. Args: sumo_configs: List of SumoConfigObject. is_overwrite: True, then the method overwrites outputs from SUMO. False raises Exception if there is a destination directory already. Default False. Returns: dict {scenario-name: `SumoResultObjects`} # end for # end if # end while # end for | 2.09809 | 2 |
Collections-a-installer/community-general-2.4.0/plugins/modules/influxdb_user.py | d-amien-b/simple-getwordpress | 22 | 6624938 | #!/usr/bin/python
# Copyright: (c) 2017, <NAME> <zhhuta () gmail.com>
# insipred by <NAME> <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users.
author: "<NAME> (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
type: str
user_password:
description:
- Password to be set for the user.
required: false
type: str
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ absent, present ]
default: present
type: str
grants:
description:
- Privileges to grant to this user.
- Takes a list of dicts containing the "database" and "privilege" keys.
- If this argument is not provided, the current grants will be left alone.
- If an empty list is provided, all grants for the user will be removed.
type: list
elements: dict
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
- name: Create a user on localhost using default login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
- name: Create a user on localhost using custom login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
- name: Create an admin user on a remote host using custom login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
- name: Create a user on localhost with privileges
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
grants:
- database: 'collectd'
privilege: 'WRITE'
- database: 'graphite'
privilege: 'READ'
- name: Destroy a user using custom login credentials
community.general.influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
state: absent
'''
RETURN = r'''
#only defaults
'''
from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.community.general.plugins.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=to_native(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ConnectionError as e:
module.fail_json(msg=to_native(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def set_user_grants(module, client, user_name, grants):
changed = False
try:
current_grants = client.get_list_privileges(user_name)
# Fix privileges wording
for i, v in enumerate(current_grants):
if v['privilege'] == 'ALL PRIVILEGES':
v['privilege'] = 'ALL'
current_grants[i] = v
elif v['privilege'] == 'NO PRIVILEGES':
del(current_grants[i])
# check if the current grants are included in the desired ones
for current_grant in current_grants:
if current_grant not in grants:
if not module.check_mode:
client.revoke_privilege(current_grant['privilege'],
current_grant['database'],
user_name)
changed = True
# check if the desired grants are included in the current ones
for grant in grants:
if grant not in current_grants:
if not module.check_mode:
client.grant_privilege(grant['privilege'],
grant['database'],
user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
return changed
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list', elements='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
grants = module.params['grants']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
changed = False
if state == 'present':
if user:
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
if not module.check_mode:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
if not module.check_mode:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=to_native(e))
else:
user_password = <PASSWORD> or ''
create_user(module, client, user_name, user_password, admin)
changed = True
if grants is not None:
if set_user_grants(module, client, user_name, grants):
changed = True
module.exit_json(changed=changed)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| #!/usr/bin/python
# Copyright: (c) 2017, <NAME> <zhhuta () gmail.com>
# insipred by <NAME> <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users.
author: "<NAME> (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
type: str
user_password:
description:
- Password to be set for the user.
required: false
type: str
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ absent, present ]
default: present
type: str
grants:
description:
- Privileges to grant to this user.
- Takes a list of dicts containing the "database" and "privilege" keys.
- If this argument is not provided, the current grants will be left alone.
- If an empty list is provided, all grants for the user will be removed.
type: list
elements: dict
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
- name: Create a user on localhost using default login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
- name: Create a user on localhost using custom login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
- name: Create an admin user on a remote host using custom login credentials
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
- name: Create a user on localhost with privileges
community.general.influxdb_user:
user_name: john
user_password: <PASSWORD>
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
grants:
- database: 'collectd'
privilege: 'WRITE'
- database: 'graphite'
privilege: 'READ'
- name: Destroy a user using custom login credentials
community.general.influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ <PASSWORD> }}"
state: absent
'''
RETURN = r'''
#only defaults
'''
from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.community.general.plugins.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=to_native(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ConnectionError as e:
module.fail_json(msg=to_native(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def set_user_grants(module, client, user_name, grants):
changed = False
try:
current_grants = client.get_list_privileges(user_name)
# Fix privileges wording
for i, v in enumerate(current_grants):
if v['privilege'] == 'ALL PRIVILEGES':
v['privilege'] = 'ALL'
current_grants[i] = v
elif v['privilege'] == 'NO PRIVILEGES':
del(current_grants[i])
# check if the current grants are included in the desired ones
for current_grant in current_grants:
if current_grant not in grants:
if not module.check_mode:
client.revoke_privilege(current_grant['privilege'],
current_grant['database'],
user_name)
changed = True
# check if the desired grants are included in the current ones
for grant in grants:
if grant not in current_grants:
if not module.check_mode:
client.grant_privilege(grant['privilege'],
grant['database'],
user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
return changed
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list', elements='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
grants = module.params['grants']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
changed = False
if state == 'present':
if user:
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
if not module.check_mode:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
if not module.check_mode:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=to_native(e))
else:
user_password = <PASSWORD> or ''
create_user(module, client, user_name, user_password, admin)
changed = True
if grants is not None:
if set_user_grants(module, client, user_name, grants):
changed = True
module.exit_json(changed=changed)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| en | 0.609968 | #!/usr/bin/python # Copyright: (c) 2017, <NAME> <zhhuta () gmail.com> # insipred by <NAME> <kamil.szczygiel () intel.com> influxdb_database module # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- module: influxdb_user short_description: Manage InfluxDB users description: - Manage InfluxDB users. author: "<NAME> (@zhhuta)" requirements: - "python >= 2.6" - "influxdb >= 0.9" options: user_name: description: - Name of the user. required: True type: str user_password: description: - Password to be set for the user. required: false type: str admin: description: - Whether the user should be in the admin role or not. - Since version 2.8, the role will also be updated. default: no type: bool state: description: - State of the user. choices: [ absent, present ] default: present type: str grants: description: - Privileges to grant to this user. - Takes a list of dicts containing the "database" and "privilege" keys. - If this argument is not provided, the current grants will be left alone. - If an empty list is provided, all grants for the user will be removed. type: list elements: dict extends_documentation_fragment: - community.general.influxdb - name: Create a user on localhost using default login credentials community.general.influxdb_user: user_name: john user_password: <PASSWORD> - name: Create a user on localhost using custom login credentials community.general.influxdb_user: user_name: john user_password: <PASSWORD> login_username: "{{ influxdb_username }}" login_password: "{{ <PASSWORD> }}" - name: Create an admin user on a remote host using custom login credentials community.general.influxdb_user: user_name: john user_password: <PASSWORD> admin: yes hostname: "{{ influxdb_hostname }}" login_username: "{{ influxdb_username }}" login_password: "{{ <PASSWORD> }}" - name: Create a user on localhost with privileges community.general.influxdb_user: user_name: john user_password: <PASSWORD> login_username: "{{ influxdb_username }}" login_password: "{{ <PASSWORD> }}" grants: - database: 'collectd' privilege: 'WRITE' - database: 'graphite' privilege: 'READ' - name: Destroy a user using custom login credentials community.general.influxdb_user: user_name: john login_username: "{{ influxdb_username }}" login_password: "{{ <PASSWORD> }}" state: absent #only defaults # restore previous user # Fix privileges wording # check if the current grants are included in the desired ones # check if the desired grants are included in the current ones | 2.164463 | 2 |
EyeReader.py | dupriest/EyeReader | 0 | 6624939 | <filename>EyeReader.py<gh_stars>0
from __future__ import print_function
import json
import urlparse
import time
from datetime import datetime
from eyex.api import EyeXInterface, SampleGaze, SampleFixation
from pywinauto import Application
import Queue
import webbrowser
from collections import namedtuple
import pywinauto
from pywinauto import Application
import os.path
import win32api
import win32net
import signal, sys
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
import easygui as eg
import threading
from threading import Timer, Thread
# NAMED TUPLES ###############################################################################################
PageTurn = namedtuple('PageTurn', ['slug', 'page', 'choice'])
Picture = namedtuple('Picture', ['pt', 'pb', 'pl', 'pr'])
Text = namedtuple('Text', ['tt','tb','tl','tr'])
# GLOBAL VARIABLES ###########################################################################################
path = os.getcwd() # Return program start in path
bookshelf = open(path + '\\Bookshelf.txt', 'r').read() # Opens Bookshelf.txt
if path[-14:-1] == '\\program file': # path goes up a level from program files folder
path = path[0:len(path)-14]
eyetrack_on = False # True: Tobii EyeGo is recording data, False: Tobii EyeGo is not recording data
data_saved = False # Determines if you've reached end of book and have saved all content (vdieo, data)
t_length = 120.0 # How long the program waits until it does a timeout save on a page
alldata = Queue.Queue() # Holds SampleGaze and SampleFixation data taken from Tobii EyeGo
funcQ = Queue.Queue() # Handles 'events' from the three threads marked in Main
# F UNCTION DEFINITION(5) #####################################################################################
## 1 ##
def OpenPrograms():
"""Starts Tobii Dynavox Gaze Viewer and Internet Explorer with Tarheel Reader.
app - the pywinauto Application associated with Tobii Dynavox Gaze Viewer
tw - main window of app upon startup
window - main window of IE upon startup"""
try: # Connect/start Tobii Gaze Viewer regardless if its already running or not
app = Application().connect_(path="C:\\Program Files (x86)\\Tobii Dynavox\\Gaze Viewer\\Tobii.GazeViewer.Startup.exe")
except pywinauto.application.ProcessNotFoundError:
app = Application.start("C:\\Program Files (x86)\\Tobii Dynavox\\Gaze Viewer\\Tobii.GazeViewer.Startup.exe")
tw = app.Window_().Wait('visible', timeout=60, retry_interval=0.1)
tw.Minimize()
if bookshelf == 'A':
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-a&eyetracker=1')
else:
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-b&eyetracker=1')
c = pywinauto.findwindows.find_windows(title_re="Tar Heel Reader|Favorites")
while len(c) == 0:
c = pywinauto.findwindows.find_windows(title_re="Tar Heel Reader|Favorites")
window = pywinauto.controls.HwndWrapper.HwndWrapper(c[0])
time.sleep(1)
window.SetFocus()
window.TypeKeys('{F11}')
return [app, tw, window]
## 2 ##
def SaveData(datetime, timeout):
"""Empties contents of queue (SampleGaze and SampleFixation points, PageTurn, Picture and
Text namedtuples) into a json file in the folder 'data'."""
data = []
suffix = ''
while True:
try:
r = alldata.get(False)
except Queue.Empty:
break
if isinstance(r, PageTurn):
d = {
'type': 'PageTurn',
'slug': r.slug,
'page': r.page
}
data.append(d)
elif isinstance(r, SampleGaze):
d = {
'type': 'SampleGaze',
'timestamp': r.timestamp,
'x': r.x,
'y': r.y
}
data.append(d)
elif isinstance(r, SampleFixation):
d = {
'type': 'SampleFixation',
'event_type': r.event_type,
'timestamp': r.timestamp,
'x': r.x,
'y': r.y
}
data.append(d)
elif isinstance(r, Picture):
d = {
'type': 'Picture',
'pt': r.pt,
'pb': r.pb,
'pl': r.pl,
'pr': r.pr
}
data.append(d)
elif isinstance(r, Text):
d = {
'type': 'Text',
'tt': r.tt,
'tb': r.tb,
'tl': r.tl,
'tr': r.tr
}
data.append(d)
if timeout:
suffix = '_timeout'
if bookshelf == 'A':
filename = path +"\\data\\Bookshelf A\\eyegazedata_" + datetime.strftime('%Y-%m-%d-%H-%M-%S') + suffix + '.json'
else:
filename = path +"\\data\\Bookshelf B\\eyegazedata_" + datetime.strftime('%Y-%m-%d-%H-%M-%S') + suffix + '.json'
json.dump(data, file(filename, 'w'))
## 3 ##
def SaveVid(datetime, timeout):
"""Saves a video that includes a screen capture, heat map and gaze plot by interacting with
Tobii Gaze Viewer. Videos are stored as mp4 files in folder 'videos' in subfolder corresponding
to the bookshelf read from.
tw - the original GV window that allows you to start/stop recording.
tw1 - window that allows you to edit/save the video you've recorded.
tw2 - popup for naming video/file explorer."""
global timer
global addres_bar
eg.msgbox(msg="The video of this book reading will now be saved. While saving, please refrain from moving the cursor. Click the button below to continue.", title="TAR HEEL READER - START VIDEO SAVING", ok_button="Start")
time.sleep(1)
window.Minimize()
time.sleep(0.5)
w = app.Windows_()
tw1 = tw
while tw1 == tw: # Common way this program attempts to switch to a newly appeared window by waiting for it to be visible and enabled
for i,d in enumerate(w):
if d.IsVisible() and d.IsEnabled():
tw1 = w[i]
w = app.Windows_()
tw1.Maximize()
tw1.ClickInput(coords=(1700,65)) # Dependent on screen resolution
time.sleep(1)
w = app.Windows_()
tw2 = tw1
while tw2 == tw1:
for i,d in enumerate(w):
if d.IsVisible() and d.IsEnabled():
tw2 = w[i]
break
w = app.Windows_()
tw2.SetFocus()
if bookshelf == 'A':
date = path + "\\videos\\Bookshelf A\\" + 'eyex_' + datetime.strftime('%Y-%m-%d-%H-%M-%S')
else:
date = path + "\\videos\\Bookshelf B\\" + 'eyex_' + datetime.strftime('%Y-%m-%d-%H-%M-%S')
d = ""
for char in date:
d = d + "{" + char + "}"
filename = "{BACK}" + d
if timeout == True:
filename = filename + "{_}{t}{i}{m}{e}{o}{u}{t}"
date = date + "_timeout"
time.sleep(0.1)
tw2.TypeKeys(filename)
tw2.TypeKeys("{ENTER}")
time.sleep(1)
tw1.SetFocus()
filename = date + '.mp4'
print('Before filename loop')
while os.path.isfile(filename) == False: # Waits for video to exist before trying to exit
time.sleep(0.1)
time.sleep(5)
tw1.ClickInput(coords=(1870, 65)) # Dependent on screen resolution; tries to exit, but even if video file exists GazeViewer might still not be done saving
while tw1.IsVisible() and tw.IsVisible() == False:
tw1.ClickInput(coords=(1870, 65)) # Dependent on screen resolution; Exit Button
time.sleep(1)
if tw1.IsVisible() and tw.IsVisible() == False:
tw1.ClickInput(coords=(1100, 570)) # Depdendent on screen resolution; Popup that appears if its not done saving
tw.Minimize()
window.Maximize()
window.SetFocus()
if timeout == True:
eg.msgbox(msg="Saving complete upon timeout. To restart recording, start a new book.", title="TAR HEEL READER - SAVING COMPLETE TIMEOUT", ok_button="Read Another Book")
window.TypeKeys('{F11}')
time.sleep(0.1)
if bookshelf == 'A':
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-a&eyetracker=1')
else:
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-b&eyetracker=1')
time.sleep(1)
window.TypeKeys('{F11}')
else:
eg.msgbox(msg="Saving complete!", title="TAR HEEL READER - SAVING COMPLETE", ok_button="Continue")
timer = threading.Timer(t_length, timeoutHandler)
## 4 ##
def timeoutHandler():
funcQ.put([timeoutHandlerHelper])
## 5 ##
def timeoutHandlerHelper():
"""Function called when timer reaches t_length seconds. Force saves video and data."""
global eyetrack_on
tw.TypeKeys("{F7}")
eyetrack_on = False
data_saved = True
date = datetime.now()
SaveVid(date, True)
SaveData(date, True)
# CLASS DEFINITION(2) ##########################################################################################
## 1 ##
class Logger(WebSocket):
def handleMessage(self):
"""Function called to handle a "PageTurn" event."""
query = json.loads(self.data)
funcQ.put([self.handleMessageHelper, query])
def handleMessageHelper(self, query):
global eyetrack_on
global data_saved
global timer
# Optional print statements of query values from Tar Heel Reader
#print ('got', query)
#print ('choice is type ', query['choice'])
#print('')
choice = query['choice']
if int(query['page']) == 1: # Resets data_saved upon the start of a new book, start timer
data_saved = False
timer.start()
if choice == True and data_saved == False: # Executed on last page of a book
timer.cancel()
tw.TypeKeys("{F7}")
eyetrack_on = False
data_saved = True # Prevents rate pages still considered part of the book from causing trouble afterwards
date = datetime.now()
SaveVid(date, False) # See ## 2 ##
SaveData(date, False) # See ## 3 ##
elif choice == False and data_saved == False: # Executed on any page but the last page
pt = PageTurn(query['slug'], query['page'], query['choice'])
pic = Picture(query['pt'], query['pb'], query['pl'], query['pr'])
text = Text(query['tt'], query['tb'], query['tl'], query['tr'])
alldata.put(pt)
alldata.put(pic)
alldata.put(text)
if int(query['page']) != 1: # Reset timer upon reaching new page
timer.cancel()
timer = Timer(t_length, timeoutHandler)
timer.start()
if eyetrack_on == False: # If recording video isn't on, turn it on
tw.SetFocus()
tw.TypeKeys("{F7}")
eyetrack_on = True # Turn on data recording
def handleConnected(self):
"""Function called when Web Socket connects to Tar Heel Reader"""
print('connected')
pass
def handleClose(self):
"""Function called when Web Socket becomes disconnected from Tar Heel Reader"""
print('disconnected')
pass
## 2 ##
class Server(Thread):
def run(self):
"""Starts the thread for the Web Socket Server"""
host = ''
port = 8008
self.server = SimpleWebSocketServer(host, port, Logger)
print ('serving')
self.server.serveforever()
def close_sig_handler(self, signal, frame):
"""Function called upon Ctrl+C that kills the program"""
print ("closing")
self.server.close()
#app.kill_()
#sys.exit()
## MAIN ###################################################################################
answer = eg.buttonbox(msg="Ready to Start?", choices=["Yes","No"])
if answer == "Yes":
app, tw, window = OpenPrograms()
def handle_data(data):
"""Function called to handle EyeX SampleGaze and SampleFixation events"""
global eyetrack_on
if eyetrack_on:
alldata.put(data)
eye_api = EyeXInterface() # Thread one
eye_api_f = EyeXInterface(fixation=True)
eye_api.on_event += [handle_data]
serverThread = Server() # Thread two
serverThread.start()
signal.signal(signal.SIGTERM, serverThread.close_sig_handler)
timer = threading.Timer(t_length, timeoutHandler) # Thread three
while True:
func = funcQ.get()
print ('calling', func)
func[0](*func[1:])
else:
# Program ends and closes
pass
| <filename>EyeReader.py<gh_stars>0
from __future__ import print_function
import json
import urlparse
import time
from datetime import datetime
from eyex.api import EyeXInterface, SampleGaze, SampleFixation
from pywinauto import Application
import Queue
import webbrowser
from collections import namedtuple
import pywinauto
from pywinauto import Application
import os.path
import win32api
import win32net
import signal, sys
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
import easygui as eg
import threading
from threading import Timer, Thread
# NAMED TUPLES ###############################################################################################
PageTurn = namedtuple('PageTurn', ['slug', 'page', 'choice'])
Picture = namedtuple('Picture', ['pt', 'pb', 'pl', 'pr'])
Text = namedtuple('Text', ['tt','tb','tl','tr'])
# GLOBAL VARIABLES ###########################################################################################
path = os.getcwd() # Return program start in path
bookshelf = open(path + '\\Bookshelf.txt', 'r').read() # Opens Bookshelf.txt
if path[-14:-1] == '\\program file': # path goes up a level from program files folder
path = path[0:len(path)-14]
eyetrack_on = False # True: Tobii EyeGo is recording data, False: Tobii EyeGo is not recording data
data_saved = False # Determines if you've reached end of book and have saved all content (vdieo, data)
t_length = 120.0 # How long the program waits until it does a timeout save on a page
alldata = Queue.Queue() # Holds SampleGaze and SampleFixation data taken from Tobii EyeGo
funcQ = Queue.Queue() # Handles 'events' from the three threads marked in Main
# F UNCTION DEFINITION(5) #####################################################################################
## 1 ##
def OpenPrograms():
"""Starts Tobii Dynavox Gaze Viewer and Internet Explorer with Tarheel Reader.
app - the pywinauto Application associated with Tobii Dynavox Gaze Viewer
tw - main window of app upon startup
window - main window of IE upon startup"""
try: # Connect/start Tobii Gaze Viewer regardless if its already running or not
app = Application().connect_(path="C:\\Program Files (x86)\\Tobii Dynavox\\Gaze Viewer\\Tobii.GazeViewer.Startup.exe")
except pywinauto.application.ProcessNotFoundError:
app = Application.start("C:\\Program Files (x86)\\Tobii Dynavox\\Gaze Viewer\\Tobii.GazeViewer.Startup.exe")
tw = app.Window_().Wait('visible', timeout=60, retry_interval=0.1)
tw.Minimize()
if bookshelf == 'A':
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-a&eyetracker=1')
else:
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-b&eyetracker=1')
c = pywinauto.findwindows.find_windows(title_re="Tar Heel Reader|Favorites")
while len(c) == 0:
c = pywinauto.findwindows.find_windows(title_re="Tar Heel Reader|Favorites")
window = pywinauto.controls.HwndWrapper.HwndWrapper(c[0])
time.sleep(1)
window.SetFocus()
window.TypeKeys('{F11}')
return [app, tw, window]
## 2 ##
def SaveData(datetime, timeout):
"""Empties contents of queue (SampleGaze and SampleFixation points, PageTurn, Picture and
Text namedtuples) into a json file in the folder 'data'."""
data = []
suffix = ''
while True:
try:
r = alldata.get(False)
except Queue.Empty:
break
if isinstance(r, PageTurn):
d = {
'type': 'PageTurn',
'slug': r.slug,
'page': r.page
}
data.append(d)
elif isinstance(r, SampleGaze):
d = {
'type': 'SampleGaze',
'timestamp': r.timestamp,
'x': r.x,
'y': r.y
}
data.append(d)
elif isinstance(r, SampleFixation):
d = {
'type': 'SampleFixation',
'event_type': r.event_type,
'timestamp': r.timestamp,
'x': r.x,
'y': r.y
}
data.append(d)
elif isinstance(r, Picture):
d = {
'type': 'Picture',
'pt': r.pt,
'pb': r.pb,
'pl': r.pl,
'pr': r.pr
}
data.append(d)
elif isinstance(r, Text):
d = {
'type': 'Text',
'tt': r.tt,
'tb': r.tb,
'tl': r.tl,
'tr': r.tr
}
data.append(d)
if timeout:
suffix = '_timeout'
if bookshelf == 'A':
filename = path +"\\data\\Bookshelf A\\eyegazedata_" + datetime.strftime('%Y-%m-%d-%H-%M-%S') + suffix + '.json'
else:
filename = path +"\\data\\Bookshelf B\\eyegazedata_" + datetime.strftime('%Y-%m-%d-%H-%M-%S') + suffix + '.json'
json.dump(data, file(filename, 'w'))
## 3 ##
def SaveVid(datetime, timeout):
"""Saves a video that includes a screen capture, heat map and gaze plot by interacting with
Tobii Gaze Viewer. Videos are stored as mp4 files in folder 'videos' in subfolder corresponding
to the bookshelf read from.
tw - the original GV window that allows you to start/stop recording.
tw1 - window that allows you to edit/save the video you've recorded.
tw2 - popup for naming video/file explorer."""
global timer
global addres_bar
eg.msgbox(msg="The video of this book reading will now be saved. While saving, please refrain from moving the cursor. Click the button below to continue.", title="TAR HEEL READER - START VIDEO SAVING", ok_button="Start")
time.sleep(1)
window.Minimize()
time.sleep(0.5)
w = app.Windows_()
tw1 = tw
while tw1 == tw: # Common way this program attempts to switch to a newly appeared window by waiting for it to be visible and enabled
for i,d in enumerate(w):
if d.IsVisible() and d.IsEnabled():
tw1 = w[i]
w = app.Windows_()
tw1.Maximize()
tw1.ClickInput(coords=(1700,65)) # Dependent on screen resolution
time.sleep(1)
w = app.Windows_()
tw2 = tw1
while tw2 == tw1:
for i,d in enumerate(w):
if d.IsVisible() and d.IsEnabled():
tw2 = w[i]
break
w = app.Windows_()
tw2.SetFocus()
if bookshelf == 'A':
date = path + "\\videos\\Bookshelf A\\" + 'eyex_' + datetime.strftime('%Y-%m-%d-%H-%M-%S')
else:
date = path + "\\videos\\Bookshelf B\\" + 'eyex_' + datetime.strftime('%Y-%m-%d-%H-%M-%S')
d = ""
for char in date:
d = d + "{" + char + "}"
filename = "{BACK}" + d
if timeout == True:
filename = filename + "{_}{t}{i}{m}{e}{o}{u}{t}"
date = date + "_timeout"
time.sleep(0.1)
tw2.TypeKeys(filename)
tw2.TypeKeys("{ENTER}")
time.sleep(1)
tw1.SetFocus()
filename = date + '.mp4'
print('Before filename loop')
while os.path.isfile(filename) == False: # Waits for video to exist before trying to exit
time.sleep(0.1)
time.sleep(5)
tw1.ClickInput(coords=(1870, 65)) # Dependent on screen resolution; tries to exit, but even if video file exists GazeViewer might still not be done saving
while tw1.IsVisible() and tw.IsVisible() == False:
tw1.ClickInput(coords=(1870, 65)) # Dependent on screen resolution; Exit Button
time.sleep(1)
if tw1.IsVisible() and tw.IsVisible() == False:
tw1.ClickInput(coords=(1100, 570)) # Depdendent on screen resolution; Popup that appears if its not done saving
tw.Minimize()
window.Maximize()
window.SetFocus()
if timeout == True:
eg.msgbox(msg="Saving complete upon timeout. To restart recording, start a new book.", title="TAR HEEL READER - SAVING COMPLETE TIMEOUT", ok_button="Read Another Book")
window.TypeKeys('{F11}')
time.sleep(0.1)
if bookshelf == 'A':
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-a&eyetracker=1')
else:
webbrowser.open('http://test.tarheelreader.org/favorites/?collection=vap-study-bookshelf-b&eyetracker=1')
time.sleep(1)
window.TypeKeys('{F11}')
else:
eg.msgbox(msg="Saving complete!", title="TAR HEEL READER - SAVING COMPLETE", ok_button="Continue")
timer = threading.Timer(t_length, timeoutHandler)
## 4 ##
def timeoutHandler():
funcQ.put([timeoutHandlerHelper])
## 5 ##
def timeoutHandlerHelper():
"""Function called when timer reaches t_length seconds. Force saves video and data."""
global eyetrack_on
tw.TypeKeys("{F7}")
eyetrack_on = False
data_saved = True
date = datetime.now()
SaveVid(date, True)
SaveData(date, True)
# CLASS DEFINITION(2) ##########################################################################################
## 1 ##
class Logger(WebSocket):
def handleMessage(self):
"""Function called to handle a "PageTurn" event."""
query = json.loads(self.data)
funcQ.put([self.handleMessageHelper, query])
def handleMessageHelper(self, query):
global eyetrack_on
global data_saved
global timer
# Optional print statements of query values from Tar Heel Reader
#print ('got', query)
#print ('choice is type ', query['choice'])
#print('')
choice = query['choice']
if int(query['page']) == 1: # Resets data_saved upon the start of a new book, start timer
data_saved = False
timer.start()
if choice == True and data_saved == False: # Executed on last page of a book
timer.cancel()
tw.TypeKeys("{F7}")
eyetrack_on = False
data_saved = True # Prevents rate pages still considered part of the book from causing trouble afterwards
date = datetime.now()
SaveVid(date, False) # See ## 2 ##
SaveData(date, False) # See ## 3 ##
elif choice == False and data_saved == False: # Executed on any page but the last page
pt = PageTurn(query['slug'], query['page'], query['choice'])
pic = Picture(query['pt'], query['pb'], query['pl'], query['pr'])
text = Text(query['tt'], query['tb'], query['tl'], query['tr'])
alldata.put(pt)
alldata.put(pic)
alldata.put(text)
if int(query['page']) != 1: # Reset timer upon reaching new page
timer.cancel()
timer = Timer(t_length, timeoutHandler)
timer.start()
if eyetrack_on == False: # If recording video isn't on, turn it on
tw.SetFocus()
tw.TypeKeys("{F7}")
eyetrack_on = True # Turn on data recording
def handleConnected(self):
"""Function called when Web Socket connects to Tar Heel Reader"""
print('connected')
pass
def handleClose(self):
"""Function called when Web Socket becomes disconnected from Tar Heel Reader"""
print('disconnected')
pass
## 2 ##
class Server(Thread):
def run(self):
"""Starts the thread for the Web Socket Server"""
host = ''
port = 8008
self.server = SimpleWebSocketServer(host, port, Logger)
print ('serving')
self.server.serveforever()
def close_sig_handler(self, signal, frame):
"""Function called upon Ctrl+C that kills the program"""
print ("closing")
self.server.close()
#app.kill_()
#sys.exit()
## MAIN ###################################################################################
answer = eg.buttonbox(msg="Ready to Start?", choices=["Yes","No"])
if answer == "Yes":
app, tw, window = OpenPrograms()
def handle_data(data):
"""Function called to handle EyeX SampleGaze and SampleFixation events"""
global eyetrack_on
if eyetrack_on:
alldata.put(data)
eye_api = EyeXInterface() # Thread one
eye_api_f = EyeXInterface(fixation=True)
eye_api.on_event += [handle_data]
serverThread = Server() # Thread two
serverThread.start()
signal.signal(signal.SIGTERM, serverThread.close_sig_handler)
timer = threading.Timer(t_length, timeoutHandler) # Thread three
while True:
func = funcQ.get()
print ('calling', func)
func[0](*func[1:])
else:
# Program ends and closes
pass
| en | 0.68934 | # NAMED TUPLES ############################################################################################### # GLOBAL VARIABLES ########################################################################################### # Return program start in path # Opens Bookshelf.txt # path goes up a level from program files folder # True: Tobii EyeGo is recording data, False: Tobii EyeGo is not recording data # Determines if you've reached end of book and have saved all content (vdieo, data) # How long the program waits until it does a timeout save on a page # Holds SampleGaze and SampleFixation data taken from Tobii EyeGo # Handles 'events' from the three threads marked in Main # F UNCTION DEFINITION(5) ##################################################################################### ## 1 ## Starts Tobii Dynavox Gaze Viewer and Internet Explorer with Tarheel Reader. app - the pywinauto Application associated with Tobii Dynavox Gaze Viewer tw - main window of app upon startup window - main window of IE upon startup # Connect/start Tobii Gaze Viewer regardless if its already running or not ## 2 ## Empties contents of queue (SampleGaze and SampleFixation points, PageTurn, Picture and Text namedtuples) into a json file in the folder 'data'. ## 3 ## Saves a video that includes a screen capture, heat map and gaze plot by interacting with Tobii Gaze Viewer. Videos are stored as mp4 files in folder 'videos' in subfolder corresponding to the bookshelf read from. tw - the original GV window that allows you to start/stop recording. tw1 - window that allows you to edit/save the video you've recorded. tw2 - popup for naming video/file explorer. # Common way this program attempts to switch to a newly appeared window by waiting for it to be visible and enabled # Dependent on screen resolution # Waits for video to exist before trying to exit # Dependent on screen resolution; tries to exit, but even if video file exists GazeViewer might still not be done saving # Dependent on screen resolution; Exit Button # Depdendent on screen resolution; Popup that appears if its not done saving ## 4 ## ## 5 ## Function called when timer reaches t_length seconds. Force saves video and data. # CLASS DEFINITION(2) ########################################################################################## ## 1 ## Function called to handle a "PageTurn" event. # Optional print statements of query values from Tar Heel Reader #print ('got', query) #print ('choice is type ', query['choice']) #print('') # Resets data_saved upon the start of a new book, start timer # Executed on last page of a book # Prevents rate pages still considered part of the book from causing trouble afterwards # See ## 2 ## # See ## 3 ## # Executed on any page but the last page # Reset timer upon reaching new page # If recording video isn't on, turn it on # Turn on data recording Function called when Web Socket connects to Tar Heel Reader Function called when Web Socket becomes disconnected from Tar Heel Reader ## 2 ## Starts the thread for the Web Socket Server Function called upon Ctrl+C that kills the program #app.kill_() #sys.exit() ## MAIN ################################################################################### Function called to handle EyeX SampleGaze and SampleFixation events # Thread one # Thread two # Thread three # Program ends and closes | 2.314481 | 2 |
eekhoorn/tests/test_sql.py | aether-space/eekhoorn | 0 | 6624940 | <reponame>aether-space/eekhoorn
# encoding: utf-8
try:
import unittest2 as unittest
except ImportError:
import unittest
from eekhoorn.sql import statement_finished
class SqlTest(unittest.TestCase):
def test_finished(self):
sql = "SELECT * FROM spam WHERE eggs LIKE '%';"
self.assertTrue(statement_finished(sql))
def test_unfinished_string(self):
sql = "SELECT ';"
self.assertFalse(statement_finished(sql))
def test_unfinished_string_with_newline(self):
sql = "SELECT '\n;"
self.assertFalse(statement_finished(sql))
@unittest.skip("Bug in sqlparse")
def test_unfinished(self):
sql = "SELECT '\n;\n'"
self.assertFalse(statement_finished(sql))
def test_finished_multiline_string(self):
sql = "SELECT '\n;\n';"
self.assertTrue(statement_finished(sql))
| # encoding: utf-8
try:
import unittest2 as unittest
except ImportError:
import unittest
from eekhoorn.sql import statement_finished
class SqlTest(unittest.TestCase):
def test_finished(self):
sql = "SELECT * FROM spam WHERE eggs LIKE '%';"
self.assertTrue(statement_finished(sql))
def test_unfinished_string(self):
sql = "SELECT ';"
self.assertFalse(statement_finished(sql))
def test_unfinished_string_with_newline(self):
sql = "SELECT '\n;"
self.assertFalse(statement_finished(sql))
@unittest.skip("Bug in sqlparse")
def test_unfinished(self):
sql = "SELECT '\n;\n'"
self.assertFalse(statement_finished(sql))
def test_finished_multiline_string(self):
sql = "SELECT '\n;\n';"
self.assertTrue(statement_finished(sql)) | en | 0.83829 | # encoding: utf-8 | 3.146009 | 3 |
tests/my_test.py | dosemeion/mmdetection-hqd | 0 | 6624941 | import torch
# import numpy as np
from mmdet.models.necks.yolo_neck_shuffle_cat import ShuffleCatNeck
device = torch.device('cuda:0')
x1 = torch.rand(1, 1024, 20, 20).to(device)
x2 = torch.rand(1, 512, 40, 40).to(device)
x3 = torch.rand(1, 256, 80, 80).to(device)
in_channels = [1024, 512, 256]
out_channels = [512, 256, 128]
model = ShuffleCatNeck(3, in_channels, out_channels).to(device)
y = model([x3, x2, x1])
print(y[0].shape) | import torch
# import numpy as np
from mmdet.models.necks.yolo_neck_shuffle_cat import ShuffleCatNeck
device = torch.device('cuda:0')
x1 = torch.rand(1, 1024, 20, 20).to(device)
x2 = torch.rand(1, 512, 40, 40).to(device)
x3 = torch.rand(1, 256, 80, 80).to(device)
in_channels = [1024, 512, 256]
out_channels = [512, 256, 128]
model = ShuffleCatNeck(3, in_channels, out_channels).to(device)
y = model([x3, x2, x1])
print(y[0].shape) | en | 0.786256 | # import numpy as np | 2.559075 | 3 |
src/py/devops_tools/__init__.py | StatisKit/license | 6 | 6624942 | ## Copyright [2017] UMR MISTEA INRA, UMR LEPSE INRA, UMR <NAME>, ##
## EPI Virtual Plants Inria ##
## ##
## This file is part of the StatisKit project. More information can be ##
## found at ##
## ##
## http://statiskit.rtfd.io ##
## ##
## The Apache Software Foundation (ASF) licenses this file to you under ##
## the Apache License, Version 2.0 (the "License"); you may not use this ##
## file except in compliance with the License. You should have received ##
## a copy of the Apache License, Version 2.0 along with this file; see ##
## the file LICENSE. If not, you may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ##
## mplied. See the License for the specific language governing ##
## permissions and limitations under the License. ##
from .__version__ import __version__ | ## Copyright [2017] UMR MISTEA INRA, UMR LEPSE INRA, UMR <NAME>, ##
## EPI Virtual Plants Inria ##
## ##
## This file is part of the StatisKit project. More information can be ##
## found at ##
## ##
## http://statiskit.rtfd.io ##
## ##
## The Apache Software Foundation (ASF) licenses this file to you under ##
## the Apache License, Version 2.0 (the "License"); you may not use this ##
## file except in compliance with the License. You should have received ##
## a copy of the Apache License, Version 2.0 along with this file; see ##
## the file LICENSE. If not, you may obtain a copy of the License at ##
## ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
## Unless required by applicable law or agreed to in writing, software ##
## distributed under the License is distributed on an "AS IS" BASIS, ##
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ##
## mplied. See the License for the specific language governing ##
## permissions and limitations under the License. ##
from .__version__ import __version__ | en | 0.719173 | ## Copyright [2017] UMR MISTEA INRA, UMR LEPSE INRA, UMR <NAME>, ## ## EPI Virtual Plants Inria ## ## ## ## This file is part of the StatisKit project. More information can be ## ## found at ## ## ## ## http://statiskit.rtfd.io ## ## ## ## The Apache Software Foundation (ASF) licenses this file to you under ## ## the Apache License, Version 2.0 (the "License"); you may not use this ## ## file except in compliance with the License. You should have received ## ## a copy of the Apache License, Version 2.0 along with this file; see ## ## the file LICENSE. If not, you may obtain a copy of the License at ## ## ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## ## ## Unless required by applicable law or agreed to in writing, software ## ## distributed under the License is distributed on an "AS IS" BASIS, ## ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ## ## mplied. See the License for the specific language governing ## ## permissions and limitations under the License. ## | 1.174388 | 1 |
0x0F-python-object_relational_mapping/13-model_state_delete_a.py | darkares23/holbertonschool-higher_level_programming | 0 | 6624943 | <filename>0x0F-python-object_relational_mapping/13-model_state_delete_a.py<gh_stars>0
#!/usr/bin/python3
"""
script that lists all State objects from the database hbtn_0e_6_usa
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
if __name__ == '__main__':
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Session = sessionmaker(bind=engine)
session = Session()
ocurrency = session.query(State).filter(
State.name.contains("a"))
for st in ocurrency:
session.delete(st)
session.commit()
| <filename>0x0F-python-object_relational_mapping/13-model_state_delete_a.py<gh_stars>0
#!/usr/bin/python3
"""
script that lists all State objects from the database hbtn_0e_6_usa
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
if __name__ == '__main__':
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Session = sessionmaker(bind=engine)
session = Session()
ocurrency = session.query(State).filter(
State.name.contains("a"))
for st in ocurrency:
session.delete(st)
session.commit()
| en | 0.619502 | #!/usr/bin/python3 script that lists all State objects from the database hbtn_0e_6_usa | 2.502853 | 3 |
fourpisky/utils.py | 4pisky/fourpisky-core | 2 | 6624944 | """
Misc. convenience routines.
"""
import os
import string
from collections import Sequence
from ephem import Equatorial, J2000
from fourpisky.visibility import DEG_PER_RADIAN
import voeventparse
import logging
logger = logging.getLogger(__name__)
def listify(x):
"""
Returns [x] if x is not already a list.
Used to make functions accept either scalar or array inputs -
simply `listify` a variable to make sure it's in list format.
"""
if (not isinstance(x, str)) and isinstance(x, Sequence):
return x
else:
return [x]
def ensure_dir(filename):
"""Ensure parent directory exists, so you can write to `filename`."""
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
def convert_voe_coords_to_eqposn(c):
"""Unit-checked conversion from voeventparse.Position2D -> astropysics FK5"""
acceptable_coord_sys = (
voeventparse.definitions.sky_coord_system.utc_fk5_geo,
voeventparse.definitions.sky_coord_system.utc_icrs_geo
)
if (c.system not in acceptable_coord_sys
or c.units != 'deg'):
raise ValueError(
"Unrecognised Coords type: %s, %s" % (c.system, c.units))
return Equatorial(c.ra / DEG_PER_RADIAN, c.dec / DEG_PER_RADIAN,
epoch=J2000)
def namedtuple_to_dict(nt):
return {key: nt[i] for i, key in enumerate(nt._fields)}
def sanitise_string_for_stream_id(unsafe_string):
"""
Removes any unhelpful characters (e.g. #,/,<space>,%) from a string.
We pass:
-Letters ([A-Za-z])
-digits ([0-9]),
-hyphens ("-"),underscores ("_"), colons (":"), and periods (".")
-Plus symbol ('+')
We replace '\', '/','#', and <space> by underscore.
"""
s = unsafe_string
if unsafe_string[0] == ".":
s = unsafe_string[1:]
return "".join(c for c in
s.replace('/', '_').replace('\\', '_'). \
replace('#', '_').replace(' ', '_')
if c in string.ascii_letters + string.digits + '_-:.+')
def archive_voevent_to_file(v, rootdir):
relpath, filename = v.attrib['ivorn'].split('//')[1].split('#')
filename += ".xml"
fullpath = os.path.sep.join((rootdir, relpath, filename))
ensure_dir(fullpath)
with open(fullpath, 'wb') as f:
voeventparse.dump(v, f)
logger.debug("Wrote voevent {} to {}".format(
v.attrib['ivorn'], fullpath
)) | """
Misc. convenience routines.
"""
import os
import string
from collections import Sequence
from ephem import Equatorial, J2000
from fourpisky.visibility import DEG_PER_RADIAN
import voeventparse
import logging
logger = logging.getLogger(__name__)
def listify(x):
"""
Returns [x] if x is not already a list.
Used to make functions accept either scalar or array inputs -
simply `listify` a variable to make sure it's in list format.
"""
if (not isinstance(x, str)) and isinstance(x, Sequence):
return x
else:
return [x]
def ensure_dir(filename):
"""Ensure parent directory exists, so you can write to `filename`."""
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
def convert_voe_coords_to_eqposn(c):
"""Unit-checked conversion from voeventparse.Position2D -> astropysics FK5"""
acceptable_coord_sys = (
voeventparse.definitions.sky_coord_system.utc_fk5_geo,
voeventparse.definitions.sky_coord_system.utc_icrs_geo
)
if (c.system not in acceptable_coord_sys
or c.units != 'deg'):
raise ValueError(
"Unrecognised Coords type: %s, %s" % (c.system, c.units))
return Equatorial(c.ra / DEG_PER_RADIAN, c.dec / DEG_PER_RADIAN,
epoch=J2000)
def namedtuple_to_dict(nt):
return {key: nt[i] for i, key in enumerate(nt._fields)}
def sanitise_string_for_stream_id(unsafe_string):
"""
Removes any unhelpful characters (e.g. #,/,<space>,%) from a string.
We pass:
-Letters ([A-Za-z])
-digits ([0-9]),
-hyphens ("-"),underscores ("_"), colons (":"), and periods (".")
-Plus symbol ('+')
We replace '\', '/','#', and <space> by underscore.
"""
s = unsafe_string
if unsafe_string[0] == ".":
s = unsafe_string[1:]
return "".join(c for c in
s.replace('/', '_').replace('\\', '_'). \
replace('#', '_').replace(' ', '_')
if c in string.ascii_letters + string.digits + '_-:.+')
def archive_voevent_to_file(v, rootdir):
relpath, filename = v.attrib['ivorn'].split('//')[1].split('#')
filename += ".xml"
fullpath = os.path.sep.join((rootdir, relpath, filename))
ensure_dir(fullpath)
with open(fullpath, 'wb') as f:
voeventparse.dump(v, f)
logger.debug("Wrote voevent {} to {}".format(
v.attrib['ivorn'], fullpath
)) | en | 0.571937 | Misc. convenience routines. Returns [x] if x is not already a list. Used to make functions accept either scalar or array inputs - simply `listify` a variable to make sure it's in list format. Ensure parent directory exists, so you can write to `filename`. Unit-checked conversion from voeventparse.Position2D -> astropysics FK5 Removes any unhelpful characters (e.g. #,/,<space>,%) from a string. We pass: -Letters ([A-Za-z]) -digits ([0-9]), -hyphens ("-"),underscores ("_"), colons (":"), and periods (".") -Plus symbol ('+') We replace '\', '/','#', and <space> by underscore. | 2.823692 | 3 |
KerasManip/ex-datagen-std.py | shamanenas/learning-cnn | 0 | 6624945 | <filename>KerasManip/ex-datagen-std.py
# The three main types of pixel scaling techniques supported by the ImageDataGenerator class are as follows:
# - Pixel Normalization: scale pixel values to the range 0-1.
# - Pixel Centering: scale pixel values to have a zero mean.
# - Pixel Standardization: scale pixel values to have a zero mean and unit variance.
# Standardizing a image dataset
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print('-- Loaded dataset ----------------------------')
print('Train', train_images.shape, train_labels.shape)
print('Test', (test_images.shape, test_labels.shape))
print('Train Min=%.3f, Max=%.3f, Mean=%.5f, Std=%.5f' % (train_images.min(), train_images.max(), train_images.mean(), train_images.std()))
print('Test Min=%.3f, Max=%.3f, Mean=%.5f, Std=%.5f' % (test_images.min(), test_images.max(), test_images.mean(), test_images.std()))
print('---------------------------------------------')
# reshape dataset to have a single channel
width, height, channels = train_images.shape[1], train_images.shape[2], 1
train_images = train_images.reshape((train_images.shape[0], width, height, channels))
test_images = test_images.reshape((test_images.shape[0], width, height, channels))
print('Per image means')
print('Mean train=%.3f, test=%.3f' % (train_images.mean(), test_images.mean()))
# create data generator that centers pixel values and normalizes
datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
# calculate the mean of the training dataset
datagen.fit(train_images)
print('Data generator mean=%.3f, std=%.3f' % (datagen.mean, datagen.std))
# demonstrate effect on a single batch of samples
iterator = datagen.flow(train_images, train_labels, batch_size=64)
# get a batch
batchX, batchY = iterator.next()
print('Mean pixel value in the batch')
print('Shape %s, Mean=%.5f, Std=%.5f' % (batchX.shape, batchX.mean(), batchX.std()))
# demonstrate effect on entire training dataset
iterator = datagen.flow(train_images, train_labels, batch_size=len(train_images), shuffle=False)
# get a batch
batchX, batchY = iterator.next()
print('Mean pixel value in the Entire Dataset')
print('Shape %s, Mean=%.5f, Std=%.5f' % (batchX.shape, batchX.mean(), batchX.std()))
# zilvinas@zilvinas KerasManip % python ex-datagen-std.py
# -- Loaded dataset ----------------------------
# Train (60000, 28, 28) (60000,)
# Test ((10000, 28, 28), (10000,))
# Train Min=0.000, Max=255.000, Mean=33.31842, Std=78.56749
# Test Min=0.000, Max=255.000, Mean=33.79122, Std=79.17246
# ---------------------------------------------
# Per image means
# Mean train=33.318, test=33.791
# Data generator mean=33.318, std=78.567
# Mean pixel value in the batch
# Shape (64, 28, 28, 1), Mean=-0.02012, Std=0.97885
# Mean pixel value in the Entire Dataset
# Shape (60000, 28, 28, 1), Mean=-0.00000, Std=1.00000
# zilvinas@zilvinas KerasManip %
| <filename>KerasManip/ex-datagen-std.py
# The three main types of pixel scaling techniques supported by the ImageDataGenerator class are as follows:
# - Pixel Normalization: scale pixel values to the range 0-1.
# - Pixel Centering: scale pixel values to have a zero mean.
# - Pixel Standardization: scale pixel values to have a zero mean and unit variance.
# Standardizing a image dataset
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print('-- Loaded dataset ----------------------------')
print('Train', train_images.shape, train_labels.shape)
print('Test', (test_images.shape, test_labels.shape))
print('Train Min=%.3f, Max=%.3f, Mean=%.5f, Std=%.5f' % (train_images.min(), train_images.max(), train_images.mean(), train_images.std()))
print('Test Min=%.3f, Max=%.3f, Mean=%.5f, Std=%.5f' % (test_images.min(), test_images.max(), test_images.mean(), test_images.std()))
print('---------------------------------------------')
# reshape dataset to have a single channel
width, height, channels = train_images.shape[1], train_images.shape[2], 1
train_images = train_images.reshape((train_images.shape[0], width, height, channels))
test_images = test_images.reshape((test_images.shape[0], width, height, channels))
print('Per image means')
print('Mean train=%.3f, test=%.3f' % (train_images.mean(), test_images.mean()))
# create data generator that centers pixel values and normalizes
datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
# calculate the mean of the training dataset
datagen.fit(train_images)
print('Data generator mean=%.3f, std=%.3f' % (datagen.mean, datagen.std))
# demonstrate effect on a single batch of samples
iterator = datagen.flow(train_images, train_labels, batch_size=64)
# get a batch
batchX, batchY = iterator.next()
print('Mean pixel value in the batch')
print('Shape %s, Mean=%.5f, Std=%.5f' % (batchX.shape, batchX.mean(), batchX.std()))
# demonstrate effect on entire training dataset
iterator = datagen.flow(train_images, train_labels, batch_size=len(train_images), shuffle=False)
# get a batch
batchX, batchY = iterator.next()
print('Mean pixel value in the Entire Dataset')
print('Shape %s, Mean=%.5f, Std=%.5f' % (batchX.shape, batchX.mean(), batchX.std()))
# zilvinas@zilvinas KerasManip % python ex-datagen-std.py
# -- Loaded dataset ----------------------------
# Train (60000, 28, 28) (60000,)
# Test ((10000, 28, 28), (10000,))
# Train Min=0.000, Max=255.000, Mean=33.31842, Std=78.56749
# Test Min=0.000, Max=255.000, Mean=33.79122, Std=79.17246
# ---------------------------------------------
# Per image means
# Mean train=33.318, test=33.791
# Data generator mean=33.318, std=78.567
# Mean pixel value in the batch
# Shape (64, 28, 28, 1), Mean=-0.02012, Std=0.97885
# Mean pixel value in the Entire Dataset
# Shape (60000, 28, 28, 1), Mean=-0.00000, Std=1.00000
# zilvinas@zilvinas KerasManip %
| en | 0.655616 | # The three main types of pixel scaling techniques supported by the ImageDataGenerator class are as follows: # - Pixel Normalization: scale pixel values to the range 0-1. # - Pixel Centering: scale pixel values to have a zero mean. # - Pixel Standardization: scale pixel values to have a zero mean and unit variance. # Standardizing a image dataset # reshape dataset to have a single channel # create data generator that centers pixel values and normalizes # calculate the mean of the training dataset # demonstrate effect on a single batch of samples # get a batch # demonstrate effect on entire training dataset # get a batch # zilvinas@zilvinas KerasManip % python ex-datagen-std.py # -- Loaded dataset ---------------------------- # Train (60000, 28, 28) (60000,) # Test ((10000, 28, 28), (10000,)) # Train Min=0.000, Max=255.000, Mean=33.31842, Std=78.56749 # Test Min=0.000, Max=255.000, Mean=33.79122, Std=79.17246 # --------------------------------------------- # Per image means # Mean train=33.318, test=33.791 # Data generator mean=33.318, std=78.567 # Mean pixel value in the batch # Shape (64, 28, 28, 1), Mean=-0.02012, Std=0.97885 # Mean pixel value in the Entire Dataset # Shape (60000, 28, 28, 1), Mean=-0.00000, Std=1.00000 # zilvinas@zilvinas KerasManip % | 3.584901 | 4 |
docs/conf.py | jmarrec/kiva | 10 | 6624946 | import sys
import os
import shlex
from recommonmark.parser import CommonMarkParser
from datetime import datetime
from subprocess import Popen, PIPE
def get_version():
"""
Returns project version as string from 'git describe' command.
"""
pipe = Popen('git describe --tags --always', stdout=PIPE, shell=True)
version = pipe.stdout.read()
if version:
return version.rstrip().lstrip('v')
else:
return 'X.Y'
extensions = [
'sphinx.ext.mathjax',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Kiva'
copyright = u'2012-' + str(datetime.now().year) + u', Big Ladder Software'
author = u'<NAME>'
version = get_version()
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
language = 'en'
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'Kivadoc'
#html_split_index = True
#html_theme_options = {'collapsiblesidebar': True}
latex_elements = {}
latex_documents = [
(master_doc, 'Kiva.tex', u'Kiva Documentation',
u'Neal Kruis', 'manual'),
]
man_pages = [
(master_doc, 'kiva', u'Kiva Documentation',
[author], 1)
]
| import sys
import os
import shlex
from recommonmark.parser import CommonMarkParser
from datetime import datetime
from subprocess import Popen, PIPE
def get_version():
"""
Returns project version as string from 'git describe' command.
"""
pipe = Popen('git describe --tags --always', stdout=PIPE, shell=True)
version = pipe.stdout.read()
if version:
return version.rstrip().lstrip('v')
else:
return 'X.Y'
extensions = [
'sphinx.ext.mathjax',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Kiva'
copyright = u'2012-' + str(datetime.now().year) + u', Big Ladder Software'
author = u'<NAME>'
version = get_version()
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
language = 'en'
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'Kivadoc'
#html_split_index = True
#html_theme_options = {'collapsiblesidebar': True}
latex_elements = {}
latex_documents = [
(master_doc, 'Kiva.tex', u'Kiva Documentation',
u'Neal Kruis', 'manual'),
]
man_pages = [
(master_doc, 'kiva', u'Kiva Documentation',
[author], 1)
]
| en | 0.597564 | Returns project version as string from 'git describe' command. #html_split_index = True #html_theme_options = {'collapsiblesidebar': True} | 2.175089 | 2 |
tests/utilities/test_print_schema.py | jhgg/graphql-core | 1 | 6624947 | <gh_stars>1-10
from typing import cast, Any, Dict
from graphql.language import DirectiveLocation
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLField,
GraphQLFloat,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
GraphQLInputField,
GraphQLDirective,
)
from graphql.utilities import (
build_schema,
print_schema,
print_introspection_schema,
print_value,
)
from ..utils import dedent
def expect_printed_schema(schema: GraphQLSchema) -> str:
schema_text = print_schema(schema)
# keep print_schema and build_schema in sync
assert print_schema(build_schema(schema_text)) == schema_text
return schema_text
def build_single_field_schema(field: GraphQLField):
query = GraphQLObjectType(name="Query", fields={"singleField": field})
return GraphQLSchema(query=query)
def describe_type_system_printer():
def prints_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLString))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String
}
"""
)
def prints_list_of_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLList(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]
}
"""
)
def prints_non_null_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLNonNull(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String!
}
"""
)
def prints_non_null_list_of_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLString)))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]!
}
"""
)
def prints_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField((GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]
}
"""
)
def prints_non_null_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]!
}
"""
)
def prints_object_field():
foo_type = GraphQLObjectType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
type Foo {
str: String
}
"""
)
def prints_string_field_with_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString, args={"argOne": GraphQLArgument(GraphQLInt)}
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int): String
}
"""
)
def prints_string_field_with_int_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=2)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 2): String
}
"""
)
def prints_string_field_with_string_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(
GraphQLString, default_value="tes\t de\fault"
)
},
)
)
assert expect_printed_schema(schema) == dedent(
r"""
type Query {
singleField(argOne: String = "tes\t de\fault"): String
}
"""
)
def prints_string_field_with_int_arg_with_default_null():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=None)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = null): String
}
"""
)
def prints_string_field_with_non_null_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int!): String
}
"""
)
def prints_string_field_with_multiple_args():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String): String
}
"""
)
def prints_string_field_with_multiple_args_first_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt, default_value=1),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 1, argTwo: String, argThree: Boolean): String
}
"""
)
def prints_string_field_with_multiple_args_second_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString, default_value="foo"),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String = "foo", argThree: Boolean): String
}
""" # noqa: E501
)
def prints_string_field_with_multiple_args_last_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean, default_value=False),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String, argThree: Boolean = false): String
}
""" # noqa: E501
)
def prints_schema_with_description():
schema = GraphQLSchema(
description="Schema description.", query=GraphQLObjectType("Query", {})
)
assert expect_printed_schema(schema) == dedent(
'''
"""Schema description."""
schema {
query: Query
}
type Query
'''
)
def prints_custom_query_root_types():
schema = GraphQLSchema(query=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
query: CustomType
}
type CustomType
"""
)
def prints_custom_mutation_root_types():
schema = GraphQLSchema(mutation=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
mutation: CustomType
}
type CustomType
"""
)
def prints_custom_subscription_root_types():
schema = GraphQLSchema(subscription=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
subscription: CustomType
}
type CustomType
"""
)
def prints_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={"str": GraphQLField(GraphQLString)},
interfaces=[foo_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo {
str: String
}
interface Foo {
str: String
}
"""
)
def prints_multiple_interfaces():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz", fields={"int": GraphQLField(GraphQLInt)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz {
int: Int
}
"""
)
def prints_hierarchical_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz",
interfaces=[foo_type],
fields={
"int": GraphQLField(GraphQLInt),
"str": GraphQLField(GraphQLString),
},
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
query = GraphQLObjectType(name="Query", fields={"bar": GraphQLField(bar_type)})
schema = GraphQLSchema(query, types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz implements Foo {
int: Int
str: String
}
type Query {
bar: Bar
}
"""
)
def prints_unions():
foo_type = GraphQLObjectType(
name="Foo", fields={"bool": GraphQLField(GraphQLBoolean)}
)
bar_type = GraphQLObjectType(
name="Bar", fields={"str": GraphQLField(GraphQLString)}
)
single_union = GraphQLUnionType(name="SingleUnion", types=[foo_type])
multiple_union = GraphQLUnionType(
name="MultipleUnion", types=[foo_type, bar_type]
)
schema = GraphQLSchema(types=[single_union, multiple_union])
assert expect_printed_schema(schema) == dedent(
"""
union SingleUnion = Foo
type Foo {
bool: Boolean
}
union MultipleUnion = Foo | Bar
type Bar {
str: String
}
"""
)
def prints_input_type():
input_type = GraphQLInputObjectType(
name="InputType", fields={"int": GraphQLInputField(GraphQLInt)}
)
schema = GraphQLSchema(types=[input_type])
assert expect_printed_schema(schema) == dedent(
"""
input InputType {
int: Int
}
"""
)
def prints_custom_scalar():
odd_type = GraphQLScalarType(name="Odd")
schema = GraphQLSchema(types=[odd_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Odd
"""
)
def prints_custom_scalar_with_speicified_by_url():
foo_type = GraphQLScalarType(
name="Foo", specified_by_url="https://example.com/foo_spec"
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
"""
)
def prints_enum():
rgb_type = GraphQLEnumType(
name="RGB", values=dict.fromkeys(("RED", "GREEN", "BLUE"))
)
schema = GraphQLSchema(types=[rgb_type])
assert expect_printed_schema(schema) == dedent(
"""
enum RGB {
RED
GREEN
BLUE
}
"""
)
def prints_empty_types():
schema = GraphQLSchema(
types=[
GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})),
GraphQLInputObjectType("SomeInputObject", {}),
GraphQLInterfaceType("SomeInterface", {}),
GraphQLObjectType("SomeObject", {}),
GraphQLUnionType("SomeUnion", []),
]
)
assert expect_printed_schema(schema) == dedent(
"""
enum SomeEnum
input SomeInputObject
interface SomeInterface
type SomeObject
union SomeUnion
"""
)
def prints_custom_directives():
simple_directive = GraphQLDirective(
"simpleDirective", [DirectiveLocation.FIELD]
)
complex_directive = GraphQLDirective(
"complexDirective",
[DirectiveLocation.FIELD, DirectiveLocation.QUERY],
description="Complex Directive",
args={
"stringArg": GraphQLArgument(GraphQLString),
"intArg": GraphQLArgument(GraphQLInt, default_value=-1),
},
is_repeatable=True,
)
schema = GraphQLSchema(directives=[simple_directive, complex_directive])
assert expect_printed_schema(schema) == dedent(
'''
directive @simpleDirective on FIELD
"""Complex Directive"""
directive @complexDirective(stringArg: String, intArg: Int = -1) repeatable on FIELD | QUERY
''' # noqa: E501
)
def prints_an_empty_description():
schema = build_single_field_schema(GraphQLField(GraphQLString, description=""))
assert expect_printed_schema(schema) == dedent(
'''
type Query {
""""""
singleField: String
}
'''
)
def one_line_prints_a_short_description():
schema = build_single_field_schema(
GraphQLField(GraphQLString, description="This field is awesome")
)
assert expect_printed_schema(schema) == dedent(
'''
type Query {
"""This field is awesome"""
singleField: String
}
'''
)
def prints_introspection_schema():
schema = GraphQLSchema()
output = print_introspection_schema(schema)
assert output == dedent(
'''
"""
Directs the executor to include this field or fragment only when the `if` argument is true.
"""
directive @include(
"""Included when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""
Directs the executor to skip this field or fragment when the `if` argument is true.
"""
directive @skip(
"""Skipped when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""Marks an element of a GraphQL schema as no longer supported."""
directive @deprecated(
"""
Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/).
"""
reason: String = "No longer supported"
) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
"""Exposes a URL that specifies the behaviour of this scalar."""
directive @specifiedBy(
"""The URL that specifies the behaviour of this scalar."""
url: String!
) on SCALAR
"""
A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.
"""
type __Schema {
description: String
"""A list of all types supported by this server."""
types: [__Type!]!
"""The type that query operations will be rooted at."""
queryType: __Type!
"""
If this server supports mutation, the type that mutation operations will be rooted at.
"""
mutationType: __Type
"""
If this server support subscription, the type that subscription operations will be rooted at.
"""
subscriptionType: __Type
"""A list of all directives supported by this server."""
directives: [__Directive!]!
}
"""
The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.
Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByUrl`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.
"""
type __Type {
kind: __TypeKind!
name: String
description: String
specifiedByUrl: String
fields(includeDeprecated: Boolean = false): [__Field!]
interfaces: [__Type!]
possibleTypes: [__Type!]
enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
inputFields(includeDeprecated: Boolean = false): [__InputValue!]
ofType: __Type
}
"""An enum describing what kind of type a given `__Type` is."""
enum __TypeKind {
"""Indicates this type is a scalar."""
SCALAR
"""
Indicates this type is an object. `fields` and `interfaces` are valid fields.
"""
OBJECT
"""
Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields.
"""
INTERFACE
"""Indicates this type is a union. `possibleTypes` is a valid field."""
UNION
"""Indicates this type is an enum. `enumValues` is a valid field."""
ENUM
"""
Indicates this type is an input object. `inputFields` is a valid field.
"""
INPUT_OBJECT
"""Indicates this type is a list. `ofType` is a valid field."""
LIST
"""Indicates this type is a non-null. `ofType` is a valid field."""
NON_NULL
}
"""
Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.
"""
type __Field {
name: String!
description: String
args(includeDeprecated: Boolean = false): [__InputValue!]!
type: __Type!
isDeprecated: Boolean!
deprecationReason: String
}
"""
Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.
"""
type __InputValue {
name: String!
description: String
type: __Type!
"""
A GraphQL-formatted string representing the default value for this input value.
"""
defaultValue: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.
"""
type __EnumValue {
name: String!
description: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.
"""
type __Directive {
name: String!
description: String
isRepeatable: Boolean!
locations: [__DirectiveLocation!]!
args: [__InputValue!]!
}
"""
A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.
"""
enum __DirectiveLocation {
"""Location adjacent to a query operation."""
QUERY
"""Location adjacent to a mutation operation."""
MUTATION
"""Location adjacent to a subscription operation."""
SUBSCRIPTION
"""Location adjacent to a field."""
FIELD
"""Location adjacent to a fragment definition."""
FRAGMENT_DEFINITION
"""Location adjacent to a fragment spread."""
FRAGMENT_SPREAD
"""Location adjacent to an inline fragment."""
INLINE_FRAGMENT
"""Location adjacent to a variable definition."""
VARIABLE_DEFINITION
"""Location adjacent to a schema definition."""
SCHEMA
"""Location adjacent to a scalar definition."""
SCALAR
"""Location adjacent to an object type definition."""
OBJECT
"""Location adjacent to a field definition."""
FIELD_DEFINITION
"""Location adjacent to an argument definition."""
ARGUMENT_DEFINITION
"""Location adjacent to an interface definition."""
INTERFACE
"""Location adjacent to a union definition."""
UNION
"""Location adjacent to an enum definition."""
ENUM
"""Location adjacent to an enum value definition."""
ENUM_VALUE
"""Location adjacent to an input object type definition."""
INPUT_OBJECT
"""Location adjacent to an input object field definition."""
INPUT_FIELD_DEFINITION
}
''' # noqa: E501
)
def describe_print_value():
def print_value_convenience_function():
assert print_value(1.5, GraphQLFloat) == "1.5"
assert print_value("foo", GraphQLString) == '"foo"'
| from typing import cast, Any, Dict
from graphql.language import DirectiveLocation
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLField,
GraphQLFloat,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
GraphQLInputField,
GraphQLDirective,
)
from graphql.utilities import (
build_schema,
print_schema,
print_introspection_schema,
print_value,
)
from ..utils import dedent
def expect_printed_schema(schema: GraphQLSchema) -> str:
schema_text = print_schema(schema)
# keep print_schema and build_schema in sync
assert print_schema(build_schema(schema_text)) == schema_text
return schema_text
def build_single_field_schema(field: GraphQLField):
query = GraphQLObjectType(name="Query", fields={"singleField": field})
return GraphQLSchema(query=query)
def describe_type_system_printer():
def prints_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLString))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String
}
"""
)
def prints_list_of_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLList(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]
}
"""
)
def prints_non_null_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLNonNull(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String!
}
"""
)
def prints_non_null_list_of_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLString)))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]!
}
"""
)
def prints_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField((GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]
}
"""
)
def prints_non_null_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]!
}
"""
)
def prints_object_field():
foo_type = GraphQLObjectType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
type Foo {
str: String
}
"""
)
def prints_string_field_with_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString, args={"argOne": GraphQLArgument(GraphQLInt)}
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int): String
}
"""
)
def prints_string_field_with_int_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=2)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 2): String
}
"""
)
def prints_string_field_with_string_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(
GraphQLString, default_value="tes\t de\fault"
)
},
)
)
assert expect_printed_schema(schema) == dedent(
r"""
type Query {
singleField(argOne: String = "tes\t de\fault"): String
}
"""
)
def prints_string_field_with_int_arg_with_default_null():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=None)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = null): String
}
"""
)
def prints_string_field_with_non_null_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int!): String
}
"""
)
def prints_string_field_with_multiple_args():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String): String
}
"""
)
def prints_string_field_with_multiple_args_first_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt, default_value=1),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 1, argTwo: String, argThree: Boolean): String
}
"""
)
def prints_string_field_with_multiple_args_second_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString, default_value="foo"),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String = "foo", argThree: Boolean): String
}
""" # noqa: E501
)
def prints_string_field_with_multiple_args_last_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean, default_value=False),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String, argThree: Boolean = false): String
}
""" # noqa: E501
)
def prints_schema_with_description():
schema = GraphQLSchema(
description="Schema description.", query=GraphQLObjectType("Query", {})
)
assert expect_printed_schema(schema) == dedent(
'''
"""Schema description."""
schema {
query: Query
}
type Query
'''
)
def prints_custom_query_root_types():
schema = GraphQLSchema(query=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
query: CustomType
}
type CustomType
"""
)
def prints_custom_mutation_root_types():
schema = GraphQLSchema(mutation=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
mutation: CustomType
}
type CustomType
"""
)
def prints_custom_subscription_root_types():
schema = GraphQLSchema(subscription=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
subscription: CustomType
}
type CustomType
"""
)
def prints_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={"str": GraphQLField(GraphQLString)},
interfaces=[foo_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo {
str: String
}
interface Foo {
str: String
}
"""
)
def prints_multiple_interfaces():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz", fields={"int": GraphQLField(GraphQLInt)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz {
int: Int
}
"""
)
def prints_hierarchical_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz",
interfaces=[foo_type],
fields={
"int": GraphQLField(GraphQLInt),
"str": GraphQLField(GraphQLString),
},
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
query = GraphQLObjectType(name="Query", fields={"bar": GraphQLField(bar_type)})
schema = GraphQLSchema(query, types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz implements Foo {
int: Int
str: String
}
type Query {
bar: Bar
}
"""
)
def prints_unions():
foo_type = GraphQLObjectType(
name="Foo", fields={"bool": GraphQLField(GraphQLBoolean)}
)
bar_type = GraphQLObjectType(
name="Bar", fields={"str": GraphQLField(GraphQLString)}
)
single_union = GraphQLUnionType(name="SingleUnion", types=[foo_type])
multiple_union = GraphQLUnionType(
name="MultipleUnion", types=[foo_type, bar_type]
)
schema = GraphQLSchema(types=[single_union, multiple_union])
assert expect_printed_schema(schema) == dedent(
"""
union SingleUnion = Foo
type Foo {
bool: Boolean
}
union MultipleUnion = Foo | Bar
type Bar {
str: String
}
"""
)
def prints_input_type():
input_type = GraphQLInputObjectType(
name="InputType", fields={"int": GraphQLInputField(GraphQLInt)}
)
schema = GraphQLSchema(types=[input_type])
assert expect_printed_schema(schema) == dedent(
"""
input InputType {
int: Int
}
"""
)
def prints_custom_scalar():
odd_type = GraphQLScalarType(name="Odd")
schema = GraphQLSchema(types=[odd_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Odd
"""
)
def prints_custom_scalar_with_speicified_by_url():
foo_type = GraphQLScalarType(
name="Foo", specified_by_url="https://example.com/foo_spec"
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
"""
)
def prints_enum():
rgb_type = GraphQLEnumType(
name="RGB", values=dict.fromkeys(("RED", "GREEN", "BLUE"))
)
schema = GraphQLSchema(types=[rgb_type])
assert expect_printed_schema(schema) == dedent(
"""
enum RGB {
RED
GREEN
BLUE
}
"""
)
def prints_empty_types():
schema = GraphQLSchema(
types=[
GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})),
GraphQLInputObjectType("SomeInputObject", {}),
GraphQLInterfaceType("SomeInterface", {}),
GraphQLObjectType("SomeObject", {}),
GraphQLUnionType("SomeUnion", []),
]
)
assert expect_printed_schema(schema) == dedent(
"""
enum SomeEnum
input SomeInputObject
interface SomeInterface
type SomeObject
union SomeUnion
"""
)
def prints_custom_directives():
simple_directive = GraphQLDirective(
"simpleDirective", [DirectiveLocation.FIELD]
)
complex_directive = GraphQLDirective(
"complexDirective",
[DirectiveLocation.FIELD, DirectiveLocation.QUERY],
description="Complex Directive",
args={
"stringArg": GraphQLArgument(GraphQLString),
"intArg": GraphQLArgument(GraphQLInt, default_value=-1),
},
is_repeatable=True,
)
schema = GraphQLSchema(directives=[simple_directive, complex_directive])
assert expect_printed_schema(schema) == dedent(
'''
directive @simpleDirective on FIELD
"""Complex Directive"""
directive @complexDirective(stringArg: String, intArg: Int = -1) repeatable on FIELD | QUERY
''' # noqa: E501
)
def prints_an_empty_description():
schema = build_single_field_schema(GraphQLField(GraphQLString, description=""))
assert expect_printed_schema(schema) == dedent(
'''
type Query {
""""""
singleField: String
}
'''
)
def one_line_prints_a_short_description():
schema = build_single_field_schema(
GraphQLField(GraphQLString, description="This field is awesome")
)
assert expect_printed_schema(schema) == dedent(
'''
type Query {
"""This field is awesome"""
singleField: String
}
'''
)
def prints_introspection_schema():
schema = GraphQLSchema()
output = print_introspection_schema(schema)
assert output == dedent(
'''
"""
Directs the executor to include this field or fragment only when the `if` argument is true.
"""
directive @include(
"""Included when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""
Directs the executor to skip this field or fragment when the `if` argument is true.
"""
directive @skip(
"""Skipped when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""Marks an element of a GraphQL schema as no longer supported."""
directive @deprecated(
"""
Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/).
"""
reason: String = "No longer supported"
) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
"""Exposes a URL that specifies the behaviour of this scalar."""
directive @specifiedBy(
"""The URL that specifies the behaviour of this scalar."""
url: String!
) on SCALAR
"""
A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.
"""
type __Schema {
description: String
"""A list of all types supported by this server."""
types: [__Type!]!
"""The type that query operations will be rooted at."""
queryType: __Type!
"""
If this server supports mutation, the type that mutation operations will be rooted at.
"""
mutationType: __Type
"""
If this server support subscription, the type that subscription operations will be rooted at.
"""
subscriptionType: __Type
"""A list of all directives supported by this server."""
directives: [__Directive!]!
}
"""
The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.
Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByUrl`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.
"""
type __Type {
kind: __TypeKind!
name: String
description: String
specifiedByUrl: String
fields(includeDeprecated: Boolean = false): [__Field!]
interfaces: [__Type!]
possibleTypes: [__Type!]
enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
inputFields(includeDeprecated: Boolean = false): [__InputValue!]
ofType: __Type
}
"""An enum describing what kind of type a given `__Type` is."""
enum __TypeKind {
"""Indicates this type is a scalar."""
SCALAR
"""
Indicates this type is an object. `fields` and `interfaces` are valid fields.
"""
OBJECT
"""
Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields.
"""
INTERFACE
"""Indicates this type is a union. `possibleTypes` is a valid field."""
UNION
"""Indicates this type is an enum. `enumValues` is a valid field."""
ENUM
"""
Indicates this type is an input object. `inputFields` is a valid field.
"""
INPUT_OBJECT
"""Indicates this type is a list. `ofType` is a valid field."""
LIST
"""Indicates this type is a non-null. `ofType` is a valid field."""
NON_NULL
}
"""
Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.
"""
type __Field {
name: String!
description: String
args(includeDeprecated: Boolean = false): [__InputValue!]!
type: __Type!
isDeprecated: Boolean!
deprecationReason: String
}
"""
Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.
"""
type __InputValue {
name: String!
description: String
type: __Type!
"""
A GraphQL-formatted string representing the default value for this input value.
"""
defaultValue: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.
"""
type __EnumValue {
name: String!
description: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.
"""
type __Directive {
name: String!
description: String
isRepeatable: Boolean!
locations: [__DirectiveLocation!]!
args: [__InputValue!]!
}
"""
A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.
"""
enum __DirectiveLocation {
"""Location adjacent to a query operation."""
QUERY
"""Location adjacent to a mutation operation."""
MUTATION
"""Location adjacent to a subscription operation."""
SUBSCRIPTION
"""Location adjacent to a field."""
FIELD
"""Location adjacent to a fragment definition."""
FRAGMENT_DEFINITION
"""Location adjacent to a fragment spread."""
FRAGMENT_SPREAD
"""Location adjacent to an inline fragment."""
INLINE_FRAGMENT
"""Location adjacent to a variable definition."""
VARIABLE_DEFINITION
"""Location adjacent to a schema definition."""
SCHEMA
"""Location adjacent to a scalar definition."""
SCALAR
"""Location adjacent to an object type definition."""
OBJECT
"""Location adjacent to a field definition."""
FIELD_DEFINITION
"""Location adjacent to an argument definition."""
ARGUMENT_DEFINITION
"""Location adjacent to an interface definition."""
INTERFACE
"""Location adjacent to a union definition."""
UNION
"""Location adjacent to an enum definition."""
ENUM
"""Location adjacent to an enum value definition."""
ENUM_VALUE
"""Location adjacent to an input object type definition."""
INPUT_OBJECT
"""Location adjacent to an input object field definition."""
INPUT_FIELD_DEFINITION
}
''' # noqa: E501
)
def describe_print_value():
def print_value_convenience_function():
assert print_value(1.5, GraphQLFloat) == "1.5"
assert print_value("foo", GraphQLString) == '"foo"' | en | 0.671351 | # keep print_schema and build_schema in sync type Query { singleField: String } type Query { singleField: [String] } type Query { singleField: String! } type Query { singleField: [String]! } type Query { singleField: [String!] } type Query { singleField: [String!]! } type Foo { str: String } type Query { singleField(argOne: Int): String } type Query { singleField(argOne: Int = 2): String } type Query { singleField(argOne: String = "tes\t de\fault"): String } type Query { singleField(argOne: Int = null): String } type Query { singleField(argOne: Int!): String } type Query { singleField(argOne: Int, argTwo: String): String } type Query { singleField(argOne: Int = 1, argTwo: String, argThree: Boolean): String } type Query { singleField(argOne: Int, argTwo: String = "foo", argThree: Boolean): String } # noqa: E501 type Query { singleField(argOne: Int, argTwo: String, argThree: Boolean = false): String } # noqa: E501 """Schema description.""" schema { query: Query } type Query schema { query: CustomType } type CustomType schema { mutation: CustomType } type CustomType schema { subscription: CustomType } type CustomType type Bar implements Foo { str: String } interface Foo { str: String } type Bar implements Foo & Baz { str: String int: Int } interface Foo { str: String } interface Baz { int: Int } type Bar implements Foo & Baz { str: String int: Int } interface Foo { str: String } interface Baz implements Foo { int: Int str: String } type Query { bar: Bar } union SingleUnion = Foo type Foo { bool: Boolean } union MultipleUnion = Foo | Bar type Bar { str: String } input InputType { int: Int } scalar Odd scalar Foo @specifiedBy(url: "https://example.com/foo_spec") enum RGB { RED GREEN BLUE } enum SomeEnum input SomeInputObject interface SomeInterface type SomeObject union SomeUnion directive @simpleDirective on FIELD """Complex Directive""" directive @complexDirective(stringArg: String, intArg: Int = -1) repeatable on FIELD | QUERY # noqa: E501 type Query { """""" singleField: String } type Query { """This field is awesome""" singleField: String } """ Directs the executor to include this field or fragment only when the `if` argument is true. """ directive @include( """Included when true.""" if: Boolean! ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT """ Directs the executor to skip this field or fragment when the `if` argument is true. """ directive @skip( """Skipped when true.""" if: Boolean! ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT """Marks an element of a GraphQL schema as no longer supported.""" directive @deprecated( """ Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/). """ reason: String = "No longer supported" ) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE """Exposes a URL that specifies the behaviour of this scalar.""" directive @specifiedBy( """The URL that specifies the behaviour of this scalar.""" url: String! ) on SCALAR """ A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations. """ type __Schema { description: String """A list of all types supported by this server.""" types: [__Type!]! """The type that query operations will be rooted at.""" queryType: __Type! """ If this server supports mutation, the type that mutation operations will be rooted at. """ mutationType: __Type """ If this server support subscription, the type that subscription operations will be rooted at. """ subscriptionType: __Type """A list of all directives supported by this server.""" directives: [__Directive!]! } """ The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum. Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByUrl`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types. """ type __Type { kind: __TypeKind! name: String description: String specifiedByUrl: String fields(includeDeprecated: Boolean = false): [__Field!] interfaces: [__Type!] possibleTypes: [__Type!] enumValues(includeDeprecated: Boolean = false): [__EnumValue!] inputFields(includeDeprecated: Boolean = false): [__InputValue!] ofType: __Type } """An enum describing what kind of type a given `__Type` is.""" enum __TypeKind { """Indicates this type is a scalar.""" SCALAR """ Indicates this type is an object. `fields` and `interfaces` are valid fields. """ OBJECT """ Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields. """ INTERFACE """Indicates this type is a union. `possibleTypes` is a valid field.""" UNION """Indicates this type is an enum. `enumValues` is a valid field.""" ENUM """ Indicates this type is an input object. `inputFields` is a valid field. """ INPUT_OBJECT """Indicates this type is a list. `ofType` is a valid field.""" LIST """Indicates this type is a non-null. `ofType` is a valid field.""" NON_NULL } """ Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type. """ type __Field { name: String! description: String args(includeDeprecated: Boolean = false): [__InputValue!]! type: __Type! isDeprecated: Boolean! deprecationReason: String } """ Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value. """ type __InputValue { name: String! description: String type: __Type! """ A GraphQL-formatted string representing the default value for this input value. """ defaultValue: String isDeprecated: Boolean! deprecationReason: String } """ One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string. """ type __EnumValue { name: String! description: String isDeprecated: Boolean! deprecationReason: String } """ A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document. In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor. """ type __Directive { name: String! description: String isRepeatable: Boolean! locations: [__DirectiveLocation!]! args: [__InputValue!]! } """ A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies. """ enum __DirectiveLocation { """Location adjacent to a query operation.""" QUERY """Location adjacent to a mutation operation.""" MUTATION """Location adjacent to a subscription operation.""" SUBSCRIPTION """Location adjacent to a field.""" FIELD """Location adjacent to a fragment definition.""" FRAGMENT_DEFINITION """Location adjacent to a fragment spread.""" FRAGMENT_SPREAD """Location adjacent to an inline fragment.""" INLINE_FRAGMENT """Location adjacent to a variable definition.""" VARIABLE_DEFINITION """Location adjacent to a schema definition.""" SCHEMA """Location adjacent to a scalar definition.""" SCALAR """Location adjacent to an object type definition.""" OBJECT """Location adjacent to a field definition.""" FIELD_DEFINITION """Location adjacent to an argument definition.""" ARGUMENT_DEFINITION """Location adjacent to an interface definition.""" INTERFACE """Location adjacent to a union definition.""" UNION """Location adjacent to an enum definition.""" ENUM """Location adjacent to an enum value definition.""" ENUM_VALUE """Location adjacent to an input object type definition.""" INPUT_OBJECT """Location adjacent to an input object field definition.""" INPUT_FIELD_DEFINITION } # noqa: E501 | 2.128886 | 2 |
nzme_skynet/core/controls/text.py | MilindThakur/nzme-skynet | 2 | 6624948 | # coding=utf-8
from nzme_skynet.core.controls.clickabletext import ClickableText
class Text(ClickableText): pass
| # coding=utf-8
from nzme_skynet.core.controls.clickabletext import ClickableText
class Text(ClickableText): pass
| en | 0.644078 | # coding=utf-8 | 1.167954 | 1 |
PythonDocs/src/023.py | Bean-jun/LearnGuide | 1 | 6624949 | from contextlib import contextmanager
@contextmanager
def func():
print("我又开始了~")
try:
yield 1
finally:
print("完事了~, 看")
if __name__ == '__main__':
with func() as f:
print(f)
| from contextlib import contextmanager
@contextmanager
def func():
print("我又开始了~")
try:
yield 1
finally:
print("完事了~, 看")
if __name__ == '__main__':
with func() as f:
print(f)
| none | 1 | 2.575056 | 3 | |
data_preprocess/TREC2014.py | CHIANGEL/GraphCM | 6 | 6624950 | <gh_stars>1-10
# !/usr/bin/python
# coding: utf8
from xml.dom.minidom import parse
import xml.dom.minidom
import time
import pprint
import string
import sys
sys.path.append("..")
import argparse
import re
import os
import numpy as np
import torch
import torch.nn as nn
from utils import *
from math import log
import random
import json
import matplotlib.pyplot as plot
from tqdm import tqdm
def generate_dict_list(args):
punc = '\\~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
session_sid = {}
query_qid, url_uid, vtype_vid = {'': 0}, {'': 0}, {'': 0}
uid_description = {}
total_click_num = 0
print(' - {}'.format('start parsing xml file...'))
DOMTree = xml.dom.minidom.parse(os.path.join(args.input, args.dataset))
TREC2014 = DOMTree.documentElement
sessions = TREC2014.getElementsByTagName('session')
# generate infos_per_session
print(' - {}'.format('generating infos_per_session...'))
infos_per_session = []
junk_interation_num = 0
for session in sessions:
info_per_session = {}
# get the session id
session_number = int(session.getAttribute('num'))
if not (session_number in session_sid):
session_sid[session_number] = len(session_sid)
info_per_session['session_number'] = session_number
info_per_session['sid'] = session_sid[session_number]
# print('session: {}'.format(session_number))
# Get topic id
topic = int(session.getElementsByTagName('topic')[0].getAttribute('num'))
info_per_session['topic'] = topic
# Get information within a query
interactions = session.getElementsByTagName('interaction')
interaction_infos = []
for interaction in interactions:
interaction_info = {}
# Get query/document infomation
query = interaction.getElementsByTagName('query')[0].childNodes[0].data
docs = interaction.getElementsByTagName('results')[0].getElementsByTagName('result')
doc_infos = []
# Sanity check
if len(docs) == 0:
print(' - {}'.format('WARNING: find a query with no docs: {}'.format(query)))
junk_interation_num += 1
continue
elif len(docs) > 10:
# more than 10 docs is not ok. May cause index out-of-range in embeddings
print(' - {}'.format('WARNING: find a query with more than 10 docs: {}'.format(query)))
junk_interation_num += 1
continue
elif len(docs) < 10:
# less than 10 docs is ok. Never cause index out-of-range in embeddings
print(' - {}'.format('WARNING: find a query with less than 10 docs: {}'.format(query)))
junk_interation_num += 1
continue
# Pass the sanity check, save useful information
if not (query in query_qid):
query_qid[query] = len(query_qid)
interaction_info['query'] = query
interaction_info['qid'] = query_qid[query]
interaction_info['session'] = info_per_session['session_number']
interaction_info['sid'] = info_per_session['sid']
for doc_idx, doc in enumerate(docs):
# WARNING: In case there might be junk data in TREC2014 (e.g., rank > 10), so we use manual doc_rank here
# NOTE: Vertical type is not provided in TREC datasets. It is now only provided in TianGong-ST.
# So we manually set vtype equal to 0, whose corresponding qid is 1.
doc_rank = int(doc.getAttribute('rank'))
doc_rank = 10 if doc_rank % 10 == 0 else doc_rank % 10
assert 1 <= doc_rank and doc_rank <= 10
assert doc_idx + 1 == doc_rank
url = doc.getElementsByTagName('clueweb12id')[0].childNodes[0].data
vtype = '0'
if not (url in url_uid):
url_uid[url] = len(url_uid)
if not (vtype in vtype_vid):
vtype_vid[vtype] = len(vtype_vid)
doc_info = {}
doc_info['rank'] = doc_rank
doc_info['url'] = url
doc_info['uid'] = url_uid[url]
doc_info['vtype'] = vtype
doc_info['vid'] = vtype_vid[vtype]
doc_info['click'] = 0
doc_infos.append(doc_info)
# print(' doc ranks at {}: {}'.format(doc_rank, url))
# Get click information if there are clicked docs
# Maybe there are no clicks in this query
clicks = interaction.getElementsByTagName('clicked')
if len(clicks) > 0:
clicks = clicks[0].getElementsByTagName('click')
total_click_num += len(clicks)
for click in clicks:
clicked_doc_rank = int(click.getElementsByTagName('rank')[0].childNodes[0].data)
for item in doc_infos:
if item['rank'] == clicked_doc_rank:
item['click'] = 1
break
# print(' click doc ranked at {}'.format(clicked_doc_rank))
else:
pass
# print(' click nothing')
interaction_info['docs'] = doc_infos
interaction_info['uids'] = [doc['uid'] for doc in doc_infos]
interaction_info['vids'] = [doc['vid'] for doc in doc_infos]
interaction_info['clicks'] = [doc['click'] for doc in doc_infos]
interaction_infos.append(interaction_info)
info_per_session['interactions'] = interaction_infos
infos_per_session.append(info_per_session)
print(' - {}'.format('abandon {} junk interactions'.format(junk_interation_num)))
# generate infos_per_query
print(' - {}'.format('generating infos_per_query...'))
infos_per_query = []
for info_per_session in infos_per_session:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
infos_per_query.append(interaction_info)
# save and check infos_per_session
print(' - {}'.format('save and check infos_per_session...'))
print(' - {}'.format('length of infos_per_session: {}'.format(len(infos_per_session))))
# pprint.pprint(infos_per_session)
# print('length of infos_per_session: {}'.format(len(infos_per_session)))
save_list(args.output, 'infos_per_session.list', infos_per_session)
list1 = load_list(args.output, 'infos_per_session.list')
assert len(infos_per_session) == len(list1)
for idx, item in enumerate(infos_per_session):
assert item == list1[idx]
# save and check infos_per_query
print(' - {}'.format('save and check infos_per_query...'))
print(' - {}'.format('length of infos_per_query: {}'.format(len(infos_per_query))))
# pprint.pprint(infos_per_query)
# print('length of infos_per_query: {}'.format(len(infos_per_query)))
save_list(args.output, 'infos_per_query.list', infos_per_query)
list2 = load_list(args.output, 'infos_per_query.list')
assert len(infos_per_query) == len(list2)
for idx, item in enumerate(infos_per_query):
assert item == list2[idx]
# save and check dictionaries
print(' - {}'.format('save and check dictionaries...'))
print(' - {}'.format('unique session number: {}'.format(len(session_sid))))
print(' - {}'.format('unique query number: {}'.format(len(query_qid))))
print(' - {}'.format('unique doc number: {}'.format(len(url_uid))))
print(' - {}'.format('unique vtype number: {}'.format(len(vtype_vid))))
print(' - {}'.format('total click number: {}'.format(total_click_num)))
save_dict(args.output, 'session_sid.dict', session_sid)
save_dict(args.output, 'query_qid.dict', query_qid)
save_dict(args.output, 'url_uid.dict', url_uid)
save_dict(args.output, 'vtype_vid.dict', vtype_vid)
dict1 = load_dict(args.output, 'session_sid.dict')
dict2 = load_dict(args.output, 'query_qid.dict')
dict3 = load_dict(args.output, 'url_uid.dict')
dict4 = load_dict(args.output, 'vtype_vid.dict')
assert len(session_sid) == len(dict1)
assert len(query_qid) == len(dict2)
assert len(url_uid) == len(dict3)
assert len(vtype_vid) == len(dict4)
for key in dict1:
assert dict1[key] == session_sid[key]
for key in dict2:
assert dict2[key] == query_qid[key]
for key in dict3:
assert dict3[key] == url_uid[key]
for key in dict4:
assert dict4[key] == vtype_vid[key]
print(' - {}'.format('Done'))
def generate_train_valid_test(args):
# load entity dictionaries
print(' - {}'.format('loading entity dictionaries...'))
session_sid = load_dict(args.output, 'session_sid.dict')
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
vtype_vid = load_dict(args.output, 'vtype_vid.dict')
# load infos_per_session.list
print(' - {}'.format('loading the infos_per_session...'))
infos_per_session = load_list(args.output, 'infos_per_session.list')
# Separate all sessions into train : valid : test by config ratio
session_num = len(infos_per_session)
train_session_num = int(session_num * args.trainset_ratio)
valid_session_num = int(session_num * args.validset_ratio)
test_session_num = session_num - train_session_num - valid_session_num
train_valid_split = train_session_num
valid_test_split = train_session_num + valid_session_num
print(' - {}'.format('train/valid split at: {}'.format(train_valid_split)))
print(' - {}'.format('valid/test split at: {}'.format(valid_test_split)))
print(' - {}'.format('train sessions: {}'.format(train_session_num)))
print(' - {}'.format('valid sessions: {}'.format(valid_session_num)))
print(' - {}'.format('test sessions: {}'.format(test_session_num)))
print(' - {}'.format('total sessions: {}'.format(session_num)))
# split train & valid & test sessions
print(' - {}'.format('generating train & valid & test data per session...'))
random.seed(2333)
random.shuffle(infos_per_session)
train_sessions = infos_per_session[:train_valid_split]
valid_sessions = infos_per_session[train_valid_split:valid_test_split]
test_sessions = infos_per_session[valid_test_split:]
assert train_session_num == len(train_sessions), 'train_session_num: {}, len(train_sessions): {}'.format(train_session_num, len(train_sessions))
assert valid_session_num == len(valid_sessions), 'valid_session_num: {}, len(valid_sessions): {}'.format(valid_session_num, len(valid_sessions))
assert test_session_num == len(test_sessions), 'test_session_num: {}, len(test_sessions): {}'.format(test_session_num, len(test_sessions))
assert session_num == len(train_sessions) + len(valid_sessions) + len(test_sessions), 'session_num: {}, len(train_sessions) + len(valid_sessions) + len(test_sessions): {}'.format(session_num, len(train_sessions) + len(valid_sessions) + len(test_sessions))
# generate train & valid & test queries
print(' - {}'.format('generating train & valid & test data per queries...'))
train_queries = []
valid_queries = []
test_queries = []
for info_per_session in train_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
train_queries.append(interaction_info)
for info_per_session in valid_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
valid_queries.append(interaction_info)
for info_per_session in test_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
test_queries.append(interaction_info)
print(' - {}'.format('train queries: {}'.format(len(train_queries))))
print(' - {}'.format('valid queries: {}'.format(len(valid_queries))))
print(' - {}'.format('test queries: {}'.format(len(test_queries))))
print(' - {}'.format('total queries: {}'.format(len(train_queries) + len(valid_queries) + len(test_queries))))
# Write train/valid/test query information back to txt files
print(' - {}'.format('writing back to txt files...'))
print(' - {}'.format('writing into {}/train_per_query.txt'.format(args.output)))
train_query_set, train_doc_set, train_vtype_set = generate_data_per_query(train_queries, np.arange(0, len(train_queries)), args.output, 'train_per_query')
print(' - {}'.format('writing into {}/valid_per_query.txt'.format(args.output)))
valid_query_set, valid_doc_set, valid_vtype_set = generate_data_per_query(valid_queries, np.arange(0, len(valid_queries)), args.output, 'valid_per_query')
print(' - {}'.format('writing into {}/test_per_query.txt'.format(args.output)))
test_query_set, test_doc_set, test_vtype_set = generate_data_per_query(test_queries, np.arange(0, len(test_queries)), args.output, 'test_per_query')
# statistics for cold start
print(' - {}'.format('Statistics for Cold Start:'))
print(' - {}'.format('Entity in valid not in train...'))
print(' - {}'.format('query: {}'.format(len(valid_query_set - train_query_set))))
print(' - {}'.format('doc: {}'.format(len(valid_doc_set - train_doc_set))))
print(' - {}'.format('vtype: {}'.format(len(valid_vtype_set - train_vtype_set))))
print(' - {}'.format('Entity in test not in train....'))
print(' - {}'.format('query: {}'.format(len(test_query_set - train_query_set))))
print(' - {}'.format('doc: {}'.format(len(test_doc_set - train_doc_set))))
print(' - {}'.format('vtype: {}'.format(len(test_vtype_set - train_vtype_set))))
def construct_dgat_graph(args):
# load entity dictionaries
print(' - {}'.format('loading entity dictionaries...'))
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
# Calc edge information for train/valid/test set
# set_names = ['demo']
set_names = ['train', 'valid', 'test']
qid_edges, uid_edges = set(), set()
qid_neighbors, uid_neighbors = {qid: set() for qid in range(len(query_qid))}, {uid: set() for uid in range(len(url_uid))}
for set_name in set_names:
print(' - {}'.format('Constructing relations in {} set'.format(set_name)))
lines = open(os.path.join(args.output, '{}_per_query_quid.txt'.format(set_name))).readlines()
# Relation 0: Query-Query within the same session
cur_sid = -1
qid_set = set()
for line in lines:
attr = line.strip().split('\t')
sid = int(attr[0].strip())
qid = int(attr[1].strip())
if cur_sid == sid:
# query in the same session
qid_set.add(qid)
else:
# session ends, start creating relations
qid_list = list(qid_set)
for i in range(1, len(qid_list)):
qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
qid_edges.add(str([qid_list[i - 1], qid_list[i]]))
# new session starts
cur_sid = sid
qid_set.clear()
qid_set.add(qid)
# The last session
qid_list = list(qid_set)
for i in range(1, len(qid_list)):
qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
qid_edges.add(str([qid_list[i - 1], qid_list[i]]))
# Relation 1 & 2: Document of is clicked in a Query
for line in lines:
attr = line.strip().split('\t')
qid = int(attr[1].strip())
uids = json.loads(attr[2].strip())
clicks = json.loads(attr[4].strip())
for uid, click in zip(uids, clicks):
if click:
if set_name == 'train' or set_name == 'demo':
qid_neighbors[qid].add(uid)
uid_neighbors[uid].add(qid)
# Relation 3: successive Documents in the same query
for line in lines:
attr = line.strip().split('\t')
uids = json.loads(attr[2].strip())
for i in range(1, len(uids)):
uid_edges.add(str([uids[i], uids[i - 1]]))
uid_edges.add(str([uids[i - 1], uids[i]]))
# Meta-path to q-q & u-u
for qid in qid_neighbors:
qid_neigh = list(qid_neighbors[qid])
for i in range(len(qid_neigh)):
for j in range(i + 1, len(qid_neigh)):
uid_edges.add(str([qid_neigh[i], qid_neigh[j]]))
uid_edges.add(str([qid_neigh[j], qid_neigh[i]]))
for uid in uid_neighbors:
uid_neigh = list(uid_neighbors[uid])
for i in range(len(uid_neigh)):
for j in range(i + 1, len(uid_neigh)):
qid_edges.add(str([uid_neigh[i], uid_neigh[j]]))
qid_edges.add(str([uid_neigh[j], uid_neigh[i]]))
# Add self-loop
for qid in range(len(query_qid)):
qid_edges.add(str([qid, qid]))
for uid in range(len(url_uid)):
uid_edges.add(str([uid, uid]))
# Convert & save edges information from set/list into tensor
qid_edges = [eval(edge) for edge in qid_edges]
uid_edges = [eval(edge) for edge in uid_edges]
# print(qid_edges)
# print(uid_edges)
qid_edge_index = torch.transpose(torch.from_numpy(np.array(qid_edges, dtype=np.int64)), 0, 1)
uid_edge_index = torch.transpose(torch.from_numpy(np.array(uid_edges, dtype=np.int64)), 0, 1)
torch.save(qid_edge_index, os.path.join(args.output, 'dgat_qid_edge_index.pth'))
torch.save(uid_edge_index, os.path.join(args.output, 'dgat_uid_edge_index.pth'))
# Count degrees of qid/uid nodes
qid_degrees, uid_degrees = [set([i]) for i in range(len(query_qid))], [set([i]) for i in range(len(url_uid))]
for qid_edge in qid_edges:
qid_degrees[qid_edge[0]].add(qid_edge[1])
qid_degrees[qid_edge[1]].add(qid_edge[0])
for uid_edge in uid_edges:
uid_degrees[uid_edge[0]].add(uid_edge[1])
uid_degrees[uid_edge[1]].add(uid_edge[0])
qid_degrees = [len(d_set) for d_set in qid_degrees]
uid_degrees = [len(d_set) for d_set in uid_degrees]
non_isolated_qid_cnt = sum([1 if qid_degree > 1 else 0 for qid_degree in qid_degrees])
non_isolated_uid_cnt = sum([1 if uid_degree > 1 else 0 for uid_degree in uid_degrees])
print(' - {}'.format('Mean/Max/Min qid degree: {}, {}, {}'.format(sum(qid_degrees) / len(qid_degrees), max(qid_degrees), min(qid_degrees))))
print(' - {}'.format('Mean/Max/Min uid degree: {}, {}, {}'.format(sum(uid_degrees) / len(uid_degrees), max(uid_degrees), min(uid_degrees))))
print(' - {}'.format('Non-isolated qid node num: {}'.format(non_isolated_qid_cnt)))
print(' - {}'.format('Non-isolated uid node num: {}'.format(non_isolated_uid_cnt)))
# Save direct uid-uid neighbors for neighbor feature interactions
uid_num = len(url_uid)
max_node_degree = 64
uid_neigh = [set([i]) for i in range(uid_num)]
uid_neigh_sampler = nn.Embedding(uid_num, max_node_degree)
for edge in uid_edges:
src, dst = edge[0], edge[1]
uid_neigh[src].add(dst)
uid_neigh[dst].add(src)
for idx, adj in enumerate(uid_neigh):
adj_list = list(adj)
if len(adj_list) >= max_node_degree:
adj_sample = torch.from_numpy(np.array(random.sample(adj_list, max_node_degree), dtype=np.int64))
else:
adj_sample = torch.from_numpy(np.array(random.choices(adj_list, k=max_node_degree), dtype=np.int64))
uid_neigh_sampler.weight.data[idx] = adj_sample.clone()
torch.save(uid_neigh_sampler, os.path.join(args.output, 'dgat_uid_neighbors.pth'))
def generate_dataset_for_cold_start(args):
def load_dataset(data_path):
"""
Loads the dataset
"""
data_set = []
lines = open(data_path).readlines()
previous_sid = -1
qids, uids, vids, clicks = [], [], [], []
for line in lines:
attr = line.strip().split('\t')
sid = int(attr[0].strip())
if previous_sid != sid:
# a new session starts
if previous_sid != -1:
assert len(uids) == len(qids)
assert len(vids) == len(qids)
assert len(clicks) == len(qids)
assert len(vids[0]) == 10
assert len(uids[0]) == 10
assert len(clicks[0]) == 10
data_set.append({'sid': previous_sid,
'qids': qids,
'uids': uids,
'vids': vids,
'clicks': clicks})
previous_sid = sid
qids = [int(attr[1].strip())]
uids = [json.loads(attr[2].strip())]
vids = [json.loads(attr[3].strip())]
clicks = [json.loads(attr[4].strip())]
else:
# the previous session continues
qids.append(int(attr[1].strip()))
uids.append(json.loads(attr[2].strip()))
vids.append(json.loads(attr[3].strip()))
clicks.append(json.loads(attr[4].strip()))
data_set.append({'sid': previous_sid,
'qids': qids,
'uids': uids,
'vids': vids,
'clicks': clicks,})
return data_set
# Load original train/test dataset
print(' - {}'.format('start loading train/test set...'))
train_set = load_dataset(os.path.join(args.output, 'train_per_query_quid.txt'))
test_set = load_dataset(os.path.join(args.output, 'test_per_query_quid.txt'))
print(' - {}'.format('train session num: {}'.format(len(train_set))))
print(' - {}'.format('test session num: {}'.format(len(test_set))))
# Construct train query set for filtering
print(' - {}'.format('Constructing train query set for filtering'))
step_pbar = tqdm(total=len(train_set))
train_query_set = set()
train_doc_set = set()
for session_info in train_set:
step_pbar.update(1)
train_query_set = train_query_set | set(session_info['qids'])
for uids in session_info['uids']:
train_doc_set = train_doc_set | set(uids)
print(' - {}'.format('unique train query num: {}'.format(len(train_query_set))))
print(' - {}'.format('unique train doc num: {}'.format(len(train_doc_set))))
# Divide the full test set into four mutually exclusive parts
print(' - {}'.format('Start the full test set division'))
step_pbar = tqdm(total=len(test_set))
cold_q, cold_d, cold_qd, warm_qd = [], [], [], []
for session_info in test_set:
step_pbar.update(1)
is_q_cold, is_d_cold = False, False
for qid in session_info['qids']:
if qid not in train_query_set:
is_q_cold = True
break
for uids in session_info['uids']:
for uid in uids:
if uid not in train_doc_set:
is_d_cold = True
break
if is_d_cold:
break
if is_q_cold:
if is_d_cold:
cold_qd.append(session_info)
else:
cold_q.append(session_info)
else:
if is_d_cold:
cold_d.append(session_info)
else:
warm_qd.append(session_info)
print(' - {}'.format('Total session num: {}'.format(len(cold_q) + len(cold_d) + len(cold_qd) + len(warm_qd))))
print(' - {}'.format('Cold Q session num: {}'.format(len(cold_q))))
print(' - {}'.format('Cold D session num: {}'.format(len(cold_d))))
print(' - {}'.format('Cold QD session num: {}'.format(len(cold_qd))))
print(' - {}'.format('Warm QD session num: {}'.format(len(warm_qd))))
# Save the four session sets back to files
print(' - {}'.format('Write back cold_q set'))
file = open(os.path.join(args.output, 'cold_q_test_per_query_quid.txt'), 'w')
for session_info in cold_q:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back cold_d set'))
file = open(os.path.join(args.output, 'cold_d_test_per_query_quid.txt'), 'w')
for session_info in cold_d:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back cold_qd set'))
file = open(os.path.join(args.output, 'cold_qd_test_per_query_quid.txt'), 'w')
for session_info in cold_qd:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back warm_qd set'))
file = open(os.path.join(args.output, 'warm_qd_test_per_query_quid.txt'), 'w')
for session_info in warm_qd:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
def compute_sparsity(args):
# load entity dictionaries
print(' - {}'.format('Loading entity dictionaries...'))
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
# Calc sparisity for the dataset
# Count the query-doc pairs in the dataset
print(' - {}'.format('Count the query-doc pairs in the dataset...'))
# set_names = ['demo']
set_names = ['train', 'valid', 'test']
train_qu_set, q_set, u_set = set(), set(), set()
for set_name in set_names:
print(' - {}'.format('Counting the query-doc pairs in the {} set'.format(set_name)))
lines = open(os.path.join(args.output, '{}_per_query_quid.txt'.format(set_name))).readlines()
for line in lines:
attr = line.strip().split('\t')
qid = int(attr[1].strip())
uids = json.loads(attr[2].strip())
for uid in uids:
if set_name == 'train':
train_qu_set.add(str([qid, uid]))
q_set.add(qid)
u_set.add(uid)
# Compute the sparsity
assert len(q_set) + 1 == len(query_qid)
assert len(u_set) + 1 == len(url_uid)
print(' - {}'.format('There are {} unique query-doc pairs in the training dataset...'.format(len(train_qu_set))))
print(' - {}'.format('There are {} unique queries in the dataset...'.format(len(q_set))))
print(' - {}'.format('There are {} unique docs in the dataset...'.format(len(u_set))))
print(' - {}'.format('There are {} possible query-doc pairs in the whole dataset...'.format(len(q_set) * len(u_set))))
print(' - {}'.format('The sparsity is: 1 - {} / {} = {}%'.format(len(train_qu_set), len(q_set) * len(u_set), 100 - 100 * len(train_qu_set) / (len(q_set) * len(u_set)))))
def main():
parser = argparse.ArgumentParser('TREC2014')
parser.add_argument('--dataset', default='TREC2014.xml',
help='dataset name')
parser.add_argument('--input', default='../dataset/TREC2014/',
help='input path')
parser.add_argument('--output', default='./data/TREC2014',
help='output path')
parser.add_argument('--dict_list', action='store_true',
help='generate dicts and lists for info_per_session/info_per_query')
parser.add_argument('--train_valid_test_data', action='store_true',
help='generate train/valid/test data txt')
parser.add_argument('--dgat', action='store_true',
help='construct graph for double GAT')
parser.add_argument('--cold_start', action='store_true',
help='construct dataset for studying cold start problems')
parser.add_argument('--sparsity', action='store_true',
help='compute sparisity for the dataset')
parser.add_argument('--trainset_ratio', default=0.8,
help='ratio of the train session/query according to the total number of sessions/queries')
parser.add_argument('--validset_ratio', default=0.1,
help='ratio of the valid session/query according to the total number of sessions/queries')
args = parser.parse_args()
if args.dict_list:
# generate info_per_session & info_per_query
print('===> {}'.format('generating dicts and lists...'))
generate_dict_list(args)
if args.train_valid_test_data:
# load lists saved by generate_dict_list() and generates train.txt & valid.txt & test.txt
print('===> {}'.format('generating train & valid & test data txt...'))
generate_train_valid_test(args)
if args.dgat:
# construct graph for double GAT
print('===> {}'.format('generating graph for double GAT...'))
construct_dgat_graph(args)
if args.cold_start:
# construct dataset for studying cold start problems
print('===> {}'.format('generating dataset for studying cold start problems...'))
generate_dataset_for_cold_start(args)
if args.sparsity:
# compute sparisity for the dataset
print('===> {}'.format('compute sparisity for the dataset...'))
compute_sparsity(args)
print('===> {}'.format('Done.'))
if __name__ == '__main__':
main() | # !/usr/bin/python
# coding: utf8
from xml.dom.minidom import parse
import xml.dom.minidom
import time
import pprint
import string
import sys
sys.path.append("..")
import argparse
import re
import os
import numpy as np
import torch
import torch.nn as nn
from utils import *
from math import log
import random
import json
import matplotlib.pyplot as plot
from tqdm import tqdm
def generate_dict_list(args):
punc = '\\~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
session_sid = {}
query_qid, url_uid, vtype_vid = {'': 0}, {'': 0}, {'': 0}
uid_description = {}
total_click_num = 0
print(' - {}'.format('start parsing xml file...'))
DOMTree = xml.dom.minidom.parse(os.path.join(args.input, args.dataset))
TREC2014 = DOMTree.documentElement
sessions = TREC2014.getElementsByTagName('session')
# generate infos_per_session
print(' - {}'.format('generating infos_per_session...'))
infos_per_session = []
junk_interation_num = 0
for session in sessions:
info_per_session = {}
# get the session id
session_number = int(session.getAttribute('num'))
if not (session_number in session_sid):
session_sid[session_number] = len(session_sid)
info_per_session['session_number'] = session_number
info_per_session['sid'] = session_sid[session_number]
# print('session: {}'.format(session_number))
# Get topic id
topic = int(session.getElementsByTagName('topic')[0].getAttribute('num'))
info_per_session['topic'] = topic
# Get information within a query
interactions = session.getElementsByTagName('interaction')
interaction_infos = []
for interaction in interactions:
interaction_info = {}
# Get query/document infomation
query = interaction.getElementsByTagName('query')[0].childNodes[0].data
docs = interaction.getElementsByTagName('results')[0].getElementsByTagName('result')
doc_infos = []
# Sanity check
if len(docs) == 0:
print(' - {}'.format('WARNING: find a query with no docs: {}'.format(query)))
junk_interation_num += 1
continue
elif len(docs) > 10:
# more than 10 docs is not ok. May cause index out-of-range in embeddings
print(' - {}'.format('WARNING: find a query with more than 10 docs: {}'.format(query)))
junk_interation_num += 1
continue
elif len(docs) < 10:
# less than 10 docs is ok. Never cause index out-of-range in embeddings
print(' - {}'.format('WARNING: find a query with less than 10 docs: {}'.format(query)))
junk_interation_num += 1
continue
# Pass the sanity check, save useful information
if not (query in query_qid):
query_qid[query] = len(query_qid)
interaction_info['query'] = query
interaction_info['qid'] = query_qid[query]
interaction_info['session'] = info_per_session['session_number']
interaction_info['sid'] = info_per_session['sid']
for doc_idx, doc in enumerate(docs):
# WARNING: In case there might be junk data in TREC2014 (e.g., rank > 10), so we use manual doc_rank here
# NOTE: Vertical type is not provided in TREC datasets. It is now only provided in TianGong-ST.
# So we manually set vtype equal to 0, whose corresponding qid is 1.
doc_rank = int(doc.getAttribute('rank'))
doc_rank = 10 if doc_rank % 10 == 0 else doc_rank % 10
assert 1 <= doc_rank and doc_rank <= 10
assert doc_idx + 1 == doc_rank
url = doc.getElementsByTagName('clueweb12id')[0].childNodes[0].data
vtype = '0'
if not (url in url_uid):
url_uid[url] = len(url_uid)
if not (vtype in vtype_vid):
vtype_vid[vtype] = len(vtype_vid)
doc_info = {}
doc_info['rank'] = doc_rank
doc_info['url'] = url
doc_info['uid'] = url_uid[url]
doc_info['vtype'] = vtype
doc_info['vid'] = vtype_vid[vtype]
doc_info['click'] = 0
doc_infos.append(doc_info)
# print(' doc ranks at {}: {}'.format(doc_rank, url))
# Get click information if there are clicked docs
# Maybe there are no clicks in this query
clicks = interaction.getElementsByTagName('clicked')
if len(clicks) > 0:
clicks = clicks[0].getElementsByTagName('click')
total_click_num += len(clicks)
for click in clicks:
clicked_doc_rank = int(click.getElementsByTagName('rank')[0].childNodes[0].data)
for item in doc_infos:
if item['rank'] == clicked_doc_rank:
item['click'] = 1
break
# print(' click doc ranked at {}'.format(clicked_doc_rank))
else:
pass
# print(' click nothing')
interaction_info['docs'] = doc_infos
interaction_info['uids'] = [doc['uid'] for doc in doc_infos]
interaction_info['vids'] = [doc['vid'] for doc in doc_infos]
interaction_info['clicks'] = [doc['click'] for doc in doc_infos]
interaction_infos.append(interaction_info)
info_per_session['interactions'] = interaction_infos
infos_per_session.append(info_per_session)
print(' - {}'.format('abandon {} junk interactions'.format(junk_interation_num)))
# generate infos_per_query
print(' - {}'.format('generating infos_per_query...'))
infos_per_query = []
for info_per_session in infos_per_session:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
infos_per_query.append(interaction_info)
# save and check infos_per_session
print(' - {}'.format('save and check infos_per_session...'))
print(' - {}'.format('length of infos_per_session: {}'.format(len(infos_per_session))))
# pprint.pprint(infos_per_session)
# print('length of infos_per_session: {}'.format(len(infos_per_session)))
save_list(args.output, 'infos_per_session.list', infos_per_session)
list1 = load_list(args.output, 'infos_per_session.list')
assert len(infos_per_session) == len(list1)
for idx, item in enumerate(infos_per_session):
assert item == list1[idx]
# save and check infos_per_query
print(' - {}'.format('save and check infos_per_query...'))
print(' - {}'.format('length of infos_per_query: {}'.format(len(infos_per_query))))
# pprint.pprint(infos_per_query)
# print('length of infos_per_query: {}'.format(len(infos_per_query)))
save_list(args.output, 'infos_per_query.list', infos_per_query)
list2 = load_list(args.output, 'infos_per_query.list')
assert len(infos_per_query) == len(list2)
for idx, item in enumerate(infos_per_query):
assert item == list2[idx]
# save and check dictionaries
print(' - {}'.format('save and check dictionaries...'))
print(' - {}'.format('unique session number: {}'.format(len(session_sid))))
print(' - {}'.format('unique query number: {}'.format(len(query_qid))))
print(' - {}'.format('unique doc number: {}'.format(len(url_uid))))
print(' - {}'.format('unique vtype number: {}'.format(len(vtype_vid))))
print(' - {}'.format('total click number: {}'.format(total_click_num)))
save_dict(args.output, 'session_sid.dict', session_sid)
save_dict(args.output, 'query_qid.dict', query_qid)
save_dict(args.output, 'url_uid.dict', url_uid)
save_dict(args.output, 'vtype_vid.dict', vtype_vid)
dict1 = load_dict(args.output, 'session_sid.dict')
dict2 = load_dict(args.output, 'query_qid.dict')
dict3 = load_dict(args.output, 'url_uid.dict')
dict4 = load_dict(args.output, 'vtype_vid.dict')
assert len(session_sid) == len(dict1)
assert len(query_qid) == len(dict2)
assert len(url_uid) == len(dict3)
assert len(vtype_vid) == len(dict4)
for key in dict1:
assert dict1[key] == session_sid[key]
for key in dict2:
assert dict2[key] == query_qid[key]
for key in dict3:
assert dict3[key] == url_uid[key]
for key in dict4:
assert dict4[key] == vtype_vid[key]
print(' - {}'.format('Done'))
def generate_train_valid_test(args):
# load entity dictionaries
print(' - {}'.format('loading entity dictionaries...'))
session_sid = load_dict(args.output, 'session_sid.dict')
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
vtype_vid = load_dict(args.output, 'vtype_vid.dict')
# load infos_per_session.list
print(' - {}'.format('loading the infos_per_session...'))
infos_per_session = load_list(args.output, 'infos_per_session.list')
# Separate all sessions into train : valid : test by config ratio
session_num = len(infos_per_session)
train_session_num = int(session_num * args.trainset_ratio)
valid_session_num = int(session_num * args.validset_ratio)
test_session_num = session_num - train_session_num - valid_session_num
train_valid_split = train_session_num
valid_test_split = train_session_num + valid_session_num
print(' - {}'.format('train/valid split at: {}'.format(train_valid_split)))
print(' - {}'.format('valid/test split at: {}'.format(valid_test_split)))
print(' - {}'.format('train sessions: {}'.format(train_session_num)))
print(' - {}'.format('valid sessions: {}'.format(valid_session_num)))
print(' - {}'.format('test sessions: {}'.format(test_session_num)))
print(' - {}'.format('total sessions: {}'.format(session_num)))
# split train & valid & test sessions
print(' - {}'.format('generating train & valid & test data per session...'))
random.seed(2333)
random.shuffle(infos_per_session)
train_sessions = infos_per_session[:train_valid_split]
valid_sessions = infos_per_session[train_valid_split:valid_test_split]
test_sessions = infos_per_session[valid_test_split:]
assert train_session_num == len(train_sessions), 'train_session_num: {}, len(train_sessions): {}'.format(train_session_num, len(train_sessions))
assert valid_session_num == len(valid_sessions), 'valid_session_num: {}, len(valid_sessions): {}'.format(valid_session_num, len(valid_sessions))
assert test_session_num == len(test_sessions), 'test_session_num: {}, len(test_sessions): {}'.format(test_session_num, len(test_sessions))
assert session_num == len(train_sessions) + len(valid_sessions) + len(test_sessions), 'session_num: {}, len(train_sessions) + len(valid_sessions) + len(test_sessions): {}'.format(session_num, len(train_sessions) + len(valid_sessions) + len(test_sessions))
# generate train & valid & test queries
print(' - {}'.format('generating train & valid & test data per queries...'))
train_queries = []
valid_queries = []
test_queries = []
for info_per_session in train_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
train_queries.append(interaction_info)
for info_per_session in valid_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
valid_queries.append(interaction_info)
for info_per_session in test_sessions:
interaction_infos = info_per_session['interactions']
for interaction_info in interaction_infos:
test_queries.append(interaction_info)
print(' - {}'.format('train queries: {}'.format(len(train_queries))))
print(' - {}'.format('valid queries: {}'.format(len(valid_queries))))
print(' - {}'.format('test queries: {}'.format(len(test_queries))))
print(' - {}'.format('total queries: {}'.format(len(train_queries) + len(valid_queries) + len(test_queries))))
# Write train/valid/test query information back to txt files
print(' - {}'.format('writing back to txt files...'))
print(' - {}'.format('writing into {}/train_per_query.txt'.format(args.output)))
train_query_set, train_doc_set, train_vtype_set = generate_data_per_query(train_queries, np.arange(0, len(train_queries)), args.output, 'train_per_query')
print(' - {}'.format('writing into {}/valid_per_query.txt'.format(args.output)))
valid_query_set, valid_doc_set, valid_vtype_set = generate_data_per_query(valid_queries, np.arange(0, len(valid_queries)), args.output, 'valid_per_query')
print(' - {}'.format('writing into {}/test_per_query.txt'.format(args.output)))
test_query_set, test_doc_set, test_vtype_set = generate_data_per_query(test_queries, np.arange(0, len(test_queries)), args.output, 'test_per_query')
# statistics for cold start
print(' - {}'.format('Statistics for Cold Start:'))
print(' - {}'.format('Entity in valid not in train...'))
print(' - {}'.format('query: {}'.format(len(valid_query_set - train_query_set))))
print(' - {}'.format('doc: {}'.format(len(valid_doc_set - train_doc_set))))
print(' - {}'.format('vtype: {}'.format(len(valid_vtype_set - train_vtype_set))))
print(' - {}'.format('Entity in test not in train....'))
print(' - {}'.format('query: {}'.format(len(test_query_set - train_query_set))))
print(' - {}'.format('doc: {}'.format(len(test_doc_set - train_doc_set))))
print(' - {}'.format('vtype: {}'.format(len(test_vtype_set - train_vtype_set))))
def construct_dgat_graph(args):
# load entity dictionaries
print(' - {}'.format('loading entity dictionaries...'))
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
# Calc edge information for train/valid/test set
# set_names = ['demo']
set_names = ['train', 'valid', 'test']
qid_edges, uid_edges = set(), set()
qid_neighbors, uid_neighbors = {qid: set() for qid in range(len(query_qid))}, {uid: set() for uid in range(len(url_uid))}
for set_name in set_names:
print(' - {}'.format('Constructing relations in {} set'.format(set_name)))
lines = open(os.path.join(args.output, '{}_per_query_quid.txt'.format(set_name))).readlines()
# Relation 0: Query-Query within the same session
cur_sid = -1
qid_set = set()
for line in lines:
attr = line.strip().split('\t')
sid = int(attr[0].strip())
qid = int(attr[1].strip())
if cur_sid == sid:
# query in the same session
qid_set.add(qid)
else:
# session ends, start creating relations
qid_list = list(qid_set)
for i in range(1, len(qid_list)):
qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
qid_edges.add(str([qid_list[i - 1], qid_list[i]]))
# new session starts
cur_sid = sid
qid_set.clear()
qid_set.add(qid)
# The last session
qid_list = list(qid_set)
for i in range(1, len(qid_list)):
qid_edges.add(str([qid_list[i], qid_list[i - 1]]))
qid_edges.add(str([qid_list[i - 1], qid_list[i]]))
# Relation 1 & 2: Document of is clicked in a Query
for line in lines:
attr = line.strip().split('\t')
qid = int(attr[1].strip())
uids = json.loads(attr[2].strip())
clicks = json.loads(attr[4].strip())
for uid, click in zip(uids, clicks):
if click:
if set_name == 'train' or set_name == 'demo':
qid_neighbors[qid].add(uid)
uid_neighbors[uid].add(qid)
# Relation 3: successive Documents in the same query
for line in lines:
attr = line.strip().split('\t')
uids = json.loads(attr[2].strip())
for i in range(1, len(uids)):
uid_edges.add(str([uids[i], uids[i - 1]]))
uid_edges.add(str([uids[i - 1], uids[i]]))
# Meta-path to q-q & u-u
for qid in qid_neighbors:
qid_neigh = list(qid_neighbors[qid])
for i in range(len(qid_neigh)):
for j in range(i + 1, len(qid_neigh)):
uid_edges.add(str([qid_neigh[i], qid_neigh[j]]))
uid_edges.add(str([qid_neigh[j], qid_neigh[i]]))
for uid in uid_neighbors:
uid_neigh = list(uid_neighbors[uid])
for i in range(len(uid_neigh)):
for j in range(i + 1, len(uid_neigh)):
qid_edges.add(str([uid_neigh[i], uid_neigh[j]]))
qid_edges.add(str([uid_neigh[j], uid_neigh[i]]))
# Add self-loop
for qid in range(len(query_qid)):
qid_edges.add(str([qid, qid]))
for uid in range(len(url_uid)):
uid_edges.add(str([uid, uid]))
# Convert & save edges information from set/list into tensor
qid_edges = [eval(edge) for edge in qid_edges]
uid_edges = [eval(edge) for edge in uid_edges]
# print(qid_edges)
# print(uid_edges)
qid_edge_index = torch.transpose(torch.from_numpy(np.array(qid_edges, dtype=np.int64)), 0, 1)
uid_edge_index = torch.transpose(torch.from_numpy(np.array(uid_edges, dtype=np.int64)), 0, 1)
torch.save(qid_edge_index, os.path.join(args.output, 'dgat_qid_edge_index.pth'))
torch.save(uid_edge_index, os.path.join(args.output, 'dgat_uid_edge_index.pth'))
# Count degrees of qid/uid nodes
qid_degrees, uid_degrees = [set([i]) for i in range(len(query_qid))], [set([i]) for i in range(len(url_uid))]
for qid_edge in qid_edges:
qid_degrees[qid_edge[0]].add(qid_edge[1])
qid_degrees[qid_edge[1]].add(qid_edge[0])
for uid_edge in uid_edges:
uid_degrees[uid_edge[0]].add(uid_edge[1])
uid_degrees[uid_edge[1]].add(uid_edge[0])
qid_degrees = [len(d_set) for d_set in qid_degrees]
uid_degrees = [len(d_set) for d_set in uid_degrees]
non_isolated_qid_cnt = sum([1 if qid_degree > 1 else 0 for qid_degree in qid_degrees])
non_isolated_uid_cnt = sum([1 if uid_degree > 1 else 0 for uid_degree in uid_degrees])
print(' - {}'.format('Mean/Max/Min qid degree: {}, {}, {}'.format(sum(qid_degrees) / len(qid_degrees), max(qid_degrees), min(qid_degrees))))
print(' - {}'.format('Mean/Max/Min uid degree: {}, {}, {}'.format(sum(uid_degrees) / len(uid_degrees), max(uid_degrees), min(uid_degrees))))
print(' - {}'.format('Non-isolated qid node num: {}'.format(non_isolated_qid_cnt)))
print(' - {}'.format('Non-isolated uid node num: {}'.format(non_isolated_uid_cnt)))
# Save direct uid-uid neighbors for neighbor feature interactions
uid_num = len(url_uid)
max_node_degree = 64
uid_neigh = [set([i]) for i in range(uid_num)]
uid_neigh_sampler = nn.Embedding(uid_num, max_node_degree)
for edge in uid_edges:
src, dst = edge[0], edge[1]
uid_neigh[src].add(dst)
uid_neigh[dst].add(src)
for idx, adj in enumerate(uid_neigh):
adj_list = list(adj)
if len(adj_list) >= max_node_degree:
adj_sample = torch.from_numpy(np.array(random.sample(adj_list, max_node_degree), dtype=np.int64))
else:
adj_sample = torch.from_numpy(np.array(random.choices(adj_list, k=max_node_degree), dtype=np.int64))
uid_neigh_sampler.weight.data[idx] = adj_sample.clone()
torch.save(uid_neigh_sampler, os.path.join(args.output, 'dgat_uid_neighbors.pth'))
def generate_dataset_for_cold_start(args):
def load_dataset(data_path):
"""
Loads the dataset
"""
data_set = []
lines = open(data_path).readlines()
previous_sid = -1
qids, uids, vids, clicks = [], [], [], []
for line in lines:
attr = line.strip().split('\t')
sid = int(attr[0].strip())
if previous_sid != sid:
# a new session starts
if previous_sid != -1:
assert len(uids) == len(qids)
assert len(vids) == len(qids)
assert len(clicks) == len(qids)
assert len(vids[0]) == 10
assert len(uids[0]) == 10
assert len(clicks[0]) == 10
data_set.append({'sid': previous_sid,
'qids': qids,
'uids': uids,
'vids': vids,
'clicks': clicks})
previous_sid = sid
qids = [int(attr[1].strip())]
uids = [json.loads(attr[2].strip())]
vids = [json.loads(attr[3].strip())]
clicks = [json.loads(attr[4].strip())]
else:
# the previous session continues
qids.append(int(attr[1].strip()))
uids.append(json.loads(attr[2].strip()))
vids.append(json.loads(attr[3].strip()))
clicks.append(json.loads(attr[4].strip()))
data_set.append({'sid': previous_sid,
'qids': qids,
'uids': uids,
'vids': vids,
'clicks': clicks,})
return data_set
# Load original train/test dataset
print(' - {}'.format('start loading train/test set...'))
train_set = load_dataset(os.path.join(args.output, 'train_per_query_quid.txt'))
test_set = load_dataset(os.path.join(args.output, 'test_per_query_quid.txt'))
print(' - {}'.format('train session num: {}'.format(len(train_set))))
print(' - {}'.format('test session num: {}'.format(len(test_set))))
# Construct train query set for filtering
print(' - {}'.format('Constructing train query set for filtering'))
step_pbar = tqdm(total=len(train_set))
train_query_set = set()
train_doc_set = set()
for session_info in train_set:
step_pbar.update(1)
train_query_set = train_query_set | set(session_info['qids'])
for uids in session_info['uids']:
train_doc_set = train_doc_set | set(uids)
print(' - {}'.format('unique train query num: {}'.format(len(train_query_set))))
print(' - {}'.format('unique train doc num: {}'.format(len(train_doc_set))))
# Divide the full test set into four mutually exclusive parts
print(' - {}'.format('Start the full test set division'))
step_pbar = tqdm(total=len(test_set))
cold_q, cold_d, cold_qd, warm_qd = [], [], [], []
for session_info in test_set:
step_pbar.update(1)
is_q_cold, is_d_cold = False, False
for qid in session_info['qids']:
if qid not in train_query_set:
is_q_cold = True
break
for uids in session_info['uids']:
for uid in uids:
if uid not in train_doc_set:
is_d_cold = True
break
if is_d_cold:
break
if is_q_cold:
if is_d_cold:
cold_qd.append(session_info)
else:
cold_q.append(session_info)
else:
if is_d_cold:
cold_d.append(session_info)
else:
warm_qd.append(session_info)
print(' - {}'.format('Total session num: {}'.format(len(cold_q) + len(cold_d) + len(cold_qd) + len(warm_qd))))
print(' - {}'.format('Cold Q session num: {}'.format(len(cold_q))))
print(' - {}'.format('Cold D session num: {}'.format(len(cold_d))))
print(' - {}'.format('Cold QD session num: {}'.format(len(cold_qd))))
print(' - {}'.format('Warm QD session num: {}'.format(len(warm_qd))))
# Save the four session sets back to files
print(' - {}'.format('Write back cold_q set'))
file = open(os.path.join(args.output, 'cold_q_test_per_query_quid.txt'), 'w')
for session_info in cold_q:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back cold_d set'))
file = open(os.path.join(args.output, 'cold_d_test_per_query_quid.txt'), 'w')
for session_info in cold_d:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back cold_qd set'))
file = open(os.path.join(args.output, 'cold_qd_test_per_query_quid.txt'), 'w')
for session_info in cold_qd:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
print(' - {}'.format('Write back warm_qd set'))
file = open(os.path.join(args.output, 'warm_qd_test_per_query_quid.txt'), 'w')
for session_info in warm_qd:
sid = session_info['sid']
qids = session_info['qids']
uidsS = session_info['uids']
vidsS = session_info['vids']
clicksS = session_info['clicks']
for qid, uids, vids, clicks in zip(qids, uidsS, vidsS, clicksS):
file.write("{}\t{}\t{}\t{}\t{}\n".format(sid, qid, str(uids), str(vids), str(clicks)))
file.close()
def compute_sparsity(args):
# load entity dictionaries
print(' - {}'.format('Loading entity dictionaries...'))
query_qid = load_dict(args.output, 'query_qid.dict')
url_uid = load_dict(args.output, 'url_uid.dict')
# Calc sparisity for the dataset
# Count the query-doc pairs in the dataset
print(' - {}'.format('Count the query-doc pairs in the dataset...'))
# set_names = ['demo']
set_names = ['train', 'valid', 'test']
train_qu_set, q_set, u_set = set(), set(), set()
for set_name in set_names:
print(' - {}'.format('Counting the query-doc pairs in the {} set'.format(set_name)))
lines = open(os.path.join(args.output, '{}_per_query_quid.txt'.format(set_name))).readlines()
for line in lines:
attr = line.strip().split('\t')
qid = int(attr[1].strip())
uids = json.loads(attr[2].strip())
for uid in uids:
if set_name == 'train':
train_qu_set.add(str([qid, uid]))
q_set.add(qid)
u_set.add(uid)
# Compute the sparsity
assert len(q_set) + 1 == len(query_qid)
assert len(u_set) + 1 == len(url_uid)
print(' - {}'.format('There are {} unique query-doc pairs in the training dataset...'.format(len(train_qu_set))))
print(' - {}'.format('There are {} unique queries in the dataset...'.format(len(q_set))))
print(' - {}'.format('There are {} unique docs in the dataset...'.format(len(u_set))))
print(' - {}'.format('There are {} possible query-doc pairs in the whole dataset...'.format(len(q_set) * len(u_set))))
print(' - {}'.format('The sparsity is: 1 - {} / {} = {}%'.format(len(train_qu_set), len(q_set) * len(u_set), 100 - 100 * len(train_qu_set) / (len(q_set) * len(u_set)))))
def main():
parser = argparse.ArgumentParser('TREC2014')
parser.add_argument('--dataset', default='TREC2014.xml',
help='dataset name')
parser.add_argument('--input', default='../dataset/TREC2014/',
help='input path')
parser.add_argument('--output', default='./data/TREC2014',
help='output path')
parser.add_argument('--dict_list', action='store_true',
help='generate dicts and lists for info_per_session/info_per_query')
parser.add_argument('--train_valid_test_data', action='store_true',
help='generate train/valid/test data txt')
parser.add_argument('--dgat', action='store_true',
help='construct graph for double GAT')
parser.add_argument('--cold_start', action='store_true',
help='construct dataset for studying cold start problems')
parser.add_argument('--sparsity', action='store_true',
help='compute sparisity for the dataset')
parser.add_argument('--trainset_ratio', default=0.8,
help='ratio of the train session/query according to the total number of sessions/queries')
parser.add_argument('--validset_ratio', default=0.1,
help='ratio of the valid session/query according to the total number of sessions/queries')
args = parser.parse_args()
if args.dict_list:
# generate info_per_session & info_per_query
print('===> {}'.format('generating dicts and lists...'))
generate_dict_list(args)
if args.train_valid_test_data:
# load lists saved by generate_dict_list() and generates train.txt & valid.txt & test.txt
print('===> {}'.format('generating train & valid & test data txt...'))
generate_train_valid_test(args)
if args.dgat:
# construct graph for double GAT
print('===> {}'.format('generating graph for double GAT...'))
construct_dgat_graph(args)
if args.cold_start:
# construct dataset for studying cold start problems
print('===> {}'.format('generating dataset for studying cold start problems...'))
generate_dataset_for_cold_start(args)
if args.sparsity:
# compute sparisity for the dataset
print('===> {}'.format('compute sparisity for the dataset...'))
compute_sparsity(args)
print('===> {}'.format('Done.'))
if __name__ == '__main__':
main() | en | 0.631102 | # !/usr/bin/python # coding: utf8 #$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}' # generate infos_per_session # get the session id # print('session: {}'.format(session_number)) # Get topic id # Get information within a query # Get query/document infomation # Sanity check # more than 10 docs is not ok. May cause index out-of-range in embeddings # less than 10 docs is ok. Never cause index out-of-range in embeddings # Pass the sanity check, save useful information # WARNING: In case there might be junk data in TREC2014 (e.g., rank > 10), so we use manual doc_rank here # NOTE: Vertical type is not provided in TREC datasets. It is now only provided in TianGong-ST. # So we manually set vtype equal to 0, whose corresponding qid is 1. # print(' doc ranks at {}: {}'.format(doc_rank, url)) # Get click information if there are clicked docs # Maybe there are no clicks in this query # print(' click doc ranked at {}'.format(clicked_doc_rank)) # print(' click nothing') # generate infos_per_query # save and check infos_per_session # pprint.pprint(infos_per_session) # print('length of infos_per_session: {}'.format(len(infos_per_session))) # save and check infos_per_query # pprint.pprint(infos_per_query) # print('length of infos_per_query: {}'.format(len(infos_per_query))) # save and check dictionaries # load entity dictionaries # load infos_per_session.list # Separate all sessions into train : valid : test by config ratio # split train & valid & test sessions # generate train & valid & test queries # Write train/valid/test query information back to txt files # statistics for cold start # load entity dictionaries # Calc edge information for train/valid/test set # set_names = ['demo'] # Relation 0: Query-Query within the same session # query in the same session # session ends, start creating relations # new session starts # The last session # Relation 1 & 2: Document of is clicked in a Query # Relation 3: successive Documents in the same query # Meta-path to q-q & u-u # Add self-loop # Convert & save edges information from set/list into tensor # print(qid_edges) # print(uid_edges) # Count degrees of qid/uid nodes # Save direct uid-uid neighbors for neighbor feature interactions Loads the dataset # a new session starts # the previous session continues # Load original train/test dataset # Construct train query set for filtering # Divide the full test set into four mutually exclusive parts # Save the four session sets back to files # load entity dictionaries # Calc sparisity for the dataset # Count the query-doc pairs in the dataset # set_names = ['demo'] # Compute the sparsity # generate info_per_session & info_per_query # load lists saved by generate_dict_list() and generates train.txt & valid.txt & test.txt # construct graph for double GAT # construct dataset for studying cold start problems # compute sparisity for the dataset | 2.202119 | 2 |