repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
rosshamish/hexgrid
|
hexgrid.py
|
nodes_touching_edge
|
python
|
def nodes_touching_edge(edge_coord):
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)]
|
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
|
train
|
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L386-L398
|
[
"def hex_digit(coord, digit=1):\n \"\"\"\n Returns either the first or second digit of the hexadecimal representation of the given coordinate.\n :param coord: hexadecimal coordinate, int\n :param digit: 1 or 2, meaning either the first or second digit of the hexadecimal\n :return: int, either the first or second digit\n \"\"\"\n if digit not in [1,2]:\n raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(\n digit\n ))\n return int(hex(coord)[1+digit], 16)\n",
"def coord_from_hex_digits(digit_1, digit_2):\n \"\"\"\n Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.\n\n >> hex(coord_from_hex_digits(1, 3))\n '0x13'\n >> hex(coord_from_hex_digits(1, 10))\n '0x1A'\n\n :param digit_1: first digit, int\n :param digit_2: second digit, int\n :return: hexadecimal coordinate, int\n \"\"\"\n return digit_1*16 + digit_2\n"
] |
"""
module hexgrid provides functions for working with a hexagonal settlers of catan grid.
This module implements the coordinate system described in Robert S. Thomas's PhD dissertation on
JSettlers2, Appendix A. See the project at https://github.com/jdmonin/JSettlers2 for details.
Grids have tiles, nodes, and edges. Tiles, nodes, and edges all have coordinates
on the grid. Tiles also have identifiers numbered counter-clockwise starting from
the north-west edge. There are 19 tiles.
Adjacent locations can be computed by adding an offset to the given location. These
offsets are defined as dictionaries named _<type1>_<type2>_offsets, mapping offset->direction.
This direction is a cardinal direction represented as a string.
The edge and node coordinate spaces share values. That is, the coordinate value is
not enough to uniquely identify a location on the grid. For that reason, it is recommended
to represent locations as a (CoordType, 0xCoord) pair, each of which is guaranteed
to be unique.
See individual methods for usage.
"""
import logging
__author__ = "Ross Anderson <ross.anderson@ualberta.ca>"
__version__ = "0.2.1"
EDGE = 0
NODE = 1
TILE = 2
_tile_id_to_coord = {
# 1-19 clockwise starting from Top-Left
1: 0x37, 12: 0x59, 11: 0x7B,
2: 0x35, 13: 0x57, 18: 0x79, 10: 0x9B,
3: 0x33, 14: 0x55, 19: 0x77, 17: 0x99, 9: 0xBB,
4: 0x53, 15: 0x75, 16: 0x97, 8: 0xB9,
5: 0x73, 6: 0x95, 7: 0xB7
}
_tile_tile_offsets = {
# tile_coord - tile_coord
-0x20: 'NW',
-0x22: 'W',
-0x02: 'SW',
+0x20: 'SE',
+0x22: 'E',
+0x02: 'NE',
}
_tile_node_offsets = {
# node_coord - tile_coord
+0x01: 'N',
-0x10: 'NW',
-0x01: 'SW',
+0x10: 'S',
+0x21: 'SE',
+0x12: 'NE',
}
_tile_edge_offsets = {
# edge_coord - tile_coord
-0x10: 'NW',
-0x11: 'W',
-0x01: 'SW',
+0x10: 'SE',
+0x11: 'E',
+0x01: 'NE',
}
def location(hexgrid_type, coord):
"""
Returns a formatted string representing the coordinate. The format depends on the
coordinate type.
Tiles look like: 1, 12
Nodes look like: (1 NW), (12 S)
Edges look like: (1 NW), (12 SE)
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param coord: integer coordinate in this module's hexadecimal coordinate system
:return: formatted string for display
"""
if hexgrid_type == TILE:
return str(coord)
elif hexgrid_type == NODE:
tile_id = nearest_tile_to_node(coord)
dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
elif hexgrid_type == EDGE:
tile_id = nearest_tile_to_edge(coord)
dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def from_location(hexgrid_type, tile_id, direction=None):
"""
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param tile_id: tile identifier, int
:param direction: str
:return: integer coordinate in this module's hexadecimal coordinate system
"""
if hexgrid_type == TILE:
if direction is not None:
raise ValueError('tiles do not have a direction')
return tile_id_to_coord(tile_id)
elif hexgrid_type == NODE:
return node_coord_in_direction(tile_id, direction)
elif hexgrid_type == EDGE:
return edge_coord_in_direction(tile_id, direction)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def coastal_tile_ids():
"""
Returns a list of tile identifiers which lie on the border of the grid.
"""
return list(filter(lambda tid: len(coastal_edges(tid)) > 0, legal_tile_ids()))
def coastal_coords():
"""
A coastal coord is a 2-tuple: (tile id, direction).
An edge is coastal if it is on the grid's border.
:return: list( (tile_id, direction) )
"""
coast = list()
for tile_id in coastal_tile_ids():
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in coastal_edges(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
coast.append((tile_id, dirn))
# logging.debug('coast={}'.format(coast))
return coast
def coastal_edges(tile_id):
"""
Returns a list of coastal edge coordinate.
An edge is coastal if it is on the grid's border.
:return: list(int)
"""
edges = list()
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
edges.append(edge_coord)
return edges
def tile_id_in_direction(from_tile_id, direction):
"""
Variant on direction_to_tile. Returns None if there's no tile there.
:param from_tile_id: tile identifier, int
:param direction: str
:return: tile identifier, int or None
"""
coord_from = tile_id_to_coord(from_tile_id)
for offset, dirn in _tile_tile_offsets.items():
if dirn == direction:
coord_to = coord_from + offset
if coord_to in legal_tile_coords():
return tile_id_from_coord(coord_to)
return None
def direction_to_tile(from_tile_id, to_tile_id):
"""
Convenience method wrapping tile_tile_offset_to_direction. Used to get the direction
of the offset between two tiles. The tiles must be adjacent.
:param from_tile_id: tile identifier, int
:param to_tile_id: tile identifier, int
:return: direction from from_tile to to_tile, str
"""
coord_from = tile_id_to_coord(from_tile_id)
coord_to = tile_id_to_coord(to_tile_id)
direction = tile_tile_offset_to_direction(coord_to - coord_from)
# logging.debug('Tile direction: {}->{} is {}'.format(
# from_tile.tile_id,
# to_tile.tile_id,
# direction
# ))
return direction
def tile_tile_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-tile offset. The tiles must be adjacent.
:param offset: tile_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_tile_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-tile offset={:x}'.format(offset))
return 'ZZ'
def tile_node_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-node offset. The tile and node must be adjacent.
:param offset: node_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_node_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-node offset={:x}'.format(offset))
return 'ZZ'
def tile_edge_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-edge offset. The tile and edge must be adjacent.
:param offset: edge_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_edge_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-edge offset={:x}'.format(offset))
return 'ZZ'
def edge_coord_in_direction(tile_id, direction):
"""
Returns the edge coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: edge coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
if tile_edge_offset_to_direction(edge_coord - tile_coord) == direction:
return edge_coord
raise ValueError('No edge found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def node_coord_in_direction(tile_id, direction):
"""
Returns the node coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: node coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for node_coord in nodes_touching_tile(tile_id):
if tile_node_offset_to_direction(node_coord - tile_coord) == direction:
return node_coord
raise ValueError('No node found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def tile_id_to_coord(tile_id):
"""
Convert a tile identifier to its corresponding grid coordinate.
:param tile_id: tile identifier, Tile.tile_id
:return: coordinate of the tile, int
"""
try:
return _tile_id_to_coord[tile_id]
except KeyError:
logging.critical('Attempted conversion of non-existent tile_id={}'.format(tile_id))
return -1
def tile_id_from_coord(coord):
"""
Convert a tile coordinate to its corresponding tile identifier.
:param coord: coordinate of the tile, int
:return: tile identifier, Tile.tile_id
"""
for i, c in _tile_id_to_coord.items():
if c == coord:
return i
raise Exception('Tile id lookup failed, coord={} not found in map'.format(hex(coord)))
def nearest_tile_to_edge(edge_coord):
"""
Convenience method wrapping nearest_tile_to_edge_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_edge_using_tiles(legal_tile_ids(), edge_coord)
def nearest_tile_to_edge_using_tiles(tile_ids, edge_coord):
"""
Get the first tile found adjacent to the given edge. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if edge_coord - tile_id_to_coord(tile_id) in _tile_edge_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching edge={}'.format(edge_coord))
def nearest_tile_to_node(node_coord):
"""
Convenience method wrapping nearest_tile_to_node_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_node_using_tiles(legal_tile_ids(), node_coord)
def nearest_tile_to_node_using_tiles(tile_ids, node_coord):
"""
Get the first tile found adjacent to the given node. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if node_coord - tile_id_to_coord(tile_id) in _tile_node_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching node={}'.format(node_coord))
def edges_touching_tile(tile_id):
"""
Get a list of edge coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of edge coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
edges = []
for offset in _tile_edge_offsets.keys():
edges.append(coord + offset)
# logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))
return edges
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes
def legal_edge_coords():
"""
Return all legal edge coordinates on the grid.
"""
edges = set()
for tile_id in legal_tile_ids():
for edge in edges_touching_tile(tile_id):
edges.add(edge)
logging.debug('Legal edge coords({})={}'.format(len(edges), edges))
return edges
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
def legal_tile_ids():
"""
Return all legal tile identifiers on the grid. In the range [1,19] inclusive.
"""
return set(_tile_id_to_coord.keys())
def legal_tile_coords():
"""
Return all legal tile coordinates on the grid
"""
return set(_tile_id_to_coord.values())
def hex_digit(coord, digit=1):
"""
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
"""
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
def coord_from_hex_digits(digit_1, digit_2):
"""
Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.
>> hex(coord_from_hex_digits(1, 3))
'0x13'
>> hex(coord_from_hex_digits(1, 10))
'0x1A'
:param digit_1: first digit, int
:param digit_2: second digit, int
:return: hexadecimal coordinate, int
"""
return digit_1*16 + digit_2
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type))
|
rosshamish/hexgrid
|
hexgrid.py
|
legal_edge_coords
|
python
|
def legal_edge_coords():
edges = set()
for tile_id in legal_tile_ids():
for edge in edges_touching_tile(tile_id):
edges.add(edge)
logging.debug('Legal edge coords({})={}'.format(len(edges), edges))
return edges
|
Return all legal edge coordinates on the grid.
|
train
|
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L401-L410
|
[
"def legal_tile_ids():\n \"\"\"\n Return all legal tile identifiers on the grid. In the range [1,19] inclusive.\n \"\"\"\n return set(_tile_id_to_coord.keys())\n",
"def edges_touching_tile(tile_id):\n \"\"\"\n Get a list of edge coordinates touching the given tile.\n\n :param tile_id: tile identifier, Tile.tile_id\n :return: list of edge coordinates touching the given tile, list(int)\n \"\"\"\n coord = tile_id_to_coord(tile_id)\n edges = []\n for offset in _tile_edge_offsets.keys():\n edges.append(coord + offset)\n # logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))\n return edges\n"
] |
"""
module hexgrid provides functions for working with a hexagonal settlers of catan grid.
This module implements the coordinate system described in Robert S. Thomas's PhD dissertation on
JSettlers2, Appendix A. See the project at https://github.com/jdmonin/JSettlers2 for details.
Grids have tiles, nodes, and edges. Tiles, nodes, and edges all have coordinates
on the grid. Tiles also have identifiers numbered counter-clockwise starting from
the north-west edge. There are 19 tiles.
Adjacent locations can be computed by adding an offset to the given location. These
offsets are defined as dictionaries named _<type1>_<type2>_offsets, mapping offset->direction.
This direction is a cardinal direction represented as a string.
The edge and node coordinate spaces share values. That is, the coordinate value is
not enough to uniquely identify a location on the grid. For that reason, it is recommended
to represent locations as a (CoordType, 0xCoord) pair, each of which is guaranteed
to be unique.
See individual methods for usage.
"""
import logging
__author__ = "Ross Anderson <ross.anderson@ualberta.ca>"
__version__ = "0.2.1"
EDGE = 0
NODE = 1
TILE = 2
_tile_id_to_coord = {
# 1-19 clockwise starting from Top-Left
1: 0x37, 12: 0x59, 11: 0x7B,
2: 0x35, 13: 0x57, 18: 0x79, 10: 0x9B,
3: 0x33, 14: 0x55, 19: 0x77, 17: 0x99, 9: 0xBB,
4: 0x53, 15: 0x75, 16: 0x97, 8: 0xB9,
5: 0x73, 6: 0x95, 7: 0xB7
}
_tile_tile_offsets = {
# tile_coord - tile_coord
-0x20: 'NW',
-0x22: 'W',
-0x02: 'SW',
+0x20: 'SE',
+0x22: 'E',
+0x02: 'NE',
}
_tile_node_offsets = {
# node_coord - tile_coord
+0x01: 'N',
-0x10: 'NW',
-0x01: 'SW',
+0x10: 'S',
+0x21: 'SE',
+0x12: 'NE',
}
_tile_edge_offsets = {
# edge_coord - tile_coord
-0x10: 'NW',
-0x11: 'W',
-0x01: 'SW',
+0x10: 'SE',
+0x11: 'E',
+0x01: 'NE',
}
def location(hexgrid_type, coord):
"""
Returns a formatted string representing the coordinate. The format depends on the
coordinate type.
Tiles look like: 1, 12
Nodes look like: (1 NW), (12 S)
Edges look like: (1 NW), (12 SE)
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param coord: integer coordinate in this module's hexadecimal coordinate system
:return: formatted string for display
"""
if hexgrid_type == TILE:
return str(coord)
elif hexgrid_type == NODE:
tile_id = nearest_tile_to_node(coord)
dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
elif hexgrid_type == EDGE:
tile_id = nearest_tile_to_edge(coord)
dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def from_location(hexgrid_type, tile_id, direction=None):
"""
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param tile_id: tile identifier, int
:param direction: str
:return: integer coordinate in this module's hexadecimal coordinate system
"""
if hexgrid_type == TILE:
if direction is not None:
raise ValueError('tiles do not have a direction')
return tile_id_to_coord(tile_id)
elif hexgrid_type == NODE:
return node_coord_in_direction(tile_id, direction)
elif hexgrid_type == EDGE:
return edge_coord_in_direction(tile_id, direction)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def coastal_tile_ids():
"""
Returns a list of tile identifiers which lie on the border of the grid.
"""
return list(filter(lambda tid: len(coastal_edges(tid)) > 0, legal_tile_ids()))
def coastal_coords():
"""
A coastal coord is a 2-tuple: (tile id, direction).
An edge is coastal if it is on the grid's border.
:return: list( (tile_id, direction) )
"""
coast = list()
for tile_id in coastal_tile_ids():
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in coastal_edges(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
coast.append((tile_id, dirn))
# logging.debug('coast={}'.format(coast))
return coast
def coastal_edges(tile_id):
"""
Returns a list of coastal edge coordinate.
An edge is coastal if it is on the grid's border.
:return: list(int)
"""
edges = list()
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
edges.append(edge_coord)
return edges
def tile_id_in_direction(from_tile_id, direction):
"""
Variant on direction_to_tile. Returns None if there's no tile there.
:param from_tile_id: tile identifier, int
:param direction: str
:return: tile identifier, int or None
"""
coord_from = tile_id_to_coord(from_tile_id)
for offset, dirn in _tile_tile_offsets.items():
if dirn == direction:
coord_to = coord_from + offset
if coord_to in legal_tile_coords():
return tile_id_from_coord(coord_to)
return None
def direction_to_tile(from_tile_id, to_tile_id):
"""
Convenience method wrapping tile_tile_offset_to_direction. Used to get the direction
of the offset between two tiles. The tiles must be adjacent.
:param from_tile_id: tile identifier, int
:param to_tile_id: tile identifier, int
:return: direction from from_tile to to_tile, str
"""
coord_from = tile_id_to_coord(from_tile_id)
coord_to = tile_id_to_coord(to_tile_id)
direction = tile_tile_offset_to_direction(coord_to - coord_from)
# logging.debug('Tile direction: {}->{} is {}'.format(
# from_tile.tile_id,
# to_tile.tile_id,
# direction
# ))
return direction
def tile_tile_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-tile offset. The tiles must be adjacent.
:param offset: tile_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_tile_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-tile offset={:x}'.format(offset))
return 'ZZ'
def tile_node_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-node offset. The tile and node must be adjacent.
:param offset: node_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_node_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-node offset={:x}'.format(offset))
return 'ZZ'
def tile_edge_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-edge offset. The tile and edge must be adjacent.
:param offset: edge_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_edge_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-edge offset={:x}'.format(offset))
return 'ZZ'
def edge_coord_in_direction(tile_id, direction):
"""
Returns the edge coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: edge coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
if tile_edge_offset_to_direction(edge_coord - tile_coord) == direction:
return edge_coord
raise ValueError('No edge found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def node_coord_in_direction(tile_id, direction):
"""
Returns the node coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: node coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for node_coord in nodes_touching_tile(tile_id):
if tile_node_offset_to_direction(node_coord - tile_coord) == direction:
return node_coord
raise ValueError('No node found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def tile_id_to_coord(tile_id):
"""
Convert a tile identifier to its corresponding grid coordinate.
:param tile_id: tile identifier, Tile.tile_id
:return: coordinate of the tile, int
"""
try:
return _tile_id_to_coord[tile_id]
except KeyError:
logging.critical('Attempted conversion of non-existent tile_id={}'.format(tile_id))
return -1
def tile_id_from_coord(coord):
"""
Convert a tile coordinate to its corresponding tile identifier.
:param coord: coordinate of the tile, int
:return: tile identifier, Tile.tile_id
"""
for i, c in _tile_id_to_coord.items():
if c == coord:
return i
raise Exception('Tile id lookup failed, coord={} not found in map'.format(hex(coord)))
def nearest_tile_to_edge(edge_coord):
"""
Convenience method wrapping nearest_tile_to_edge_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_edge_using_tiles(legal_tile_ids(), edge_coord)
def nearest_tile_to_edge_using_tiles(tile_ids, edge_coord):
"""
Get the first tile found adjacent to the given edge. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if edge_coord - tile_id_to_coord(tile_id) in _tile_edge_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching edge={}'.format(edge_coord))
def nearest_tile_to_node(node_coord):
"""
Convenience method wrapping nearest_tile_to_node_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_node_using_tiles(legal_tile_ids(), node_coord)
def nearest_tile_to_node_using_tiles(tile_ids, node_coord):
"""
Get the first tile found adjacent to the given node. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if node_coord - tile_id_to_coord(tile_id) in _tile_node_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching node={}'.format(node_coord))
def edges_touching_tile(tile_id):
"""
Get a list of edge coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of edge coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
edges = []
for offset in _tile_edge_offsets.keys():
edges.append(coord + offset)
# logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))
return edges
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)]
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
def legal_tile_ids():
"""
Return all legal tile identifiers on the grid. In the range [1,19] inclusive.
"""
return set(_tile_id_to_coord.keys())
def legal_tile_coords():
"""
Return all legal tile coordinates on the grid
"""
return set(_tile_id_to_coord.values())
def hex_digit(coord, digit=1):
"""
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
"""
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
def coord_from_hex_digits(digit_1, digit_2):
"""
Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.
>> hex(coord_from_hex_digits(1, 3))
'0x13'
>> hex(coord_from_hex_digits(1, 10))
'0x1A'
:param digit_1: first digit, int
:param digit_2: second digit, int
:return: hexadecimal coordinate, int
"""
return digit_1*16 + digit_2
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type))
|
rosshamish/hexgrid
|
hexgrid.py
|
legal_node_coords
|
python
|
def legal_node_coords():
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
|
Return all legal node coordinates on the grid
|
train
|
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L413-L422
|
[
"def legal_tile_ids():\n \"\"\"\n Return all legal tile identifiers on the grid. In the range [1,19] inclusive.\n \"\"\"\n return set(_tile_id_to_coord.keys())\n",
"def nodes_touching_tile(tile_id):\n \"\"\"\n Get a list of node coordinates touching the given tile.\n\n :param tile_id: tile identifier, Tile.tile_id\n :return: list of node coordinates touching the given tile, list(int)\n \"\"\"\n coord = tile_id_to_coord(tile_id)\n nodes = []\n for offset in _tile_node_offsets.keys():\n nodes.append(coord + offset)\n # logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))\n return nodes\n"
] |
"""
module hexgrid provides functions for working with a hexagonal settlers of catan grid.
This module implements the coordinate system described in Robert S. Thomas's PhD dissertation on
JSettlers2, Appendix A. See the project at https://github.com/jdmonin/JSettlers2 for details.
Grids have tiles, nodes, and edges. Tiles, nodes, and edges all have coordinates
on the grid. Tiles also have identifiers numbered counter-clockwise starting from
the north-west edge. There are 19 tiles.
Adjacent locations can be computed by adding an offset to the given location. These
offsets are defined as dictionaries named _<type1>_<type2>_offsets, mapping offset->direction.
This direction is a cardinal direction represented as a string.
The edge and node coordinate spaces share values. That is, the coordinate value is
not enough to uniquely identify a location on the grid. For that reason, it is recommended
to represent locations as a (CoordType, 0xCoord) pair, each of which is guaranteed
to be unique.
See individual methods for usage.
"""
import logging
__author__ = "Ross Anderson <ross.anderson@ualberta.ca>"
__version__ = "0.2.1"
EDGE = 0
NODE = 1
TILE = 2
_tile_id_to_coord = {
# 1-19 clockwise starting from Top-Left
1: 0x37, 12: 0x59, 11: 0x7B,
2: 0x35, 13: 0x57, 18: 0x79, 10: 0x9B,
3: 0x33, 14: 0x55, 19: 0x77, 17: 0x99, 9: 0xBB,
4: 0x53, 15: 0x75, 16: 0x97, 8: 0xB9,
5: 0x73, 6: 0x95, 7: 0xB7
}
_tile_tile_offsets = {
# tile_coord - tile_coord
-0x20: 'NW',
-0x22: 'W',
-0x02: 'SW',
+0x20: 'SE',
+0x22: 'E',
+0x02: 'NE',
}
_tile_node_offsets = {
# node_coord - tile_coord
+0x01: 'N',
-0x10: 'NW',
-0x01: 'SW',
+0x10: 'S',
+0x21: 'SE',
+0x12: 'NE',
}
_tile_edge_offsets = {
# edge_coord - tile_coord
-0x10: 'NW',
-0x11: 'W',
-0x01: 'SW',
+0x10: 'SE',
+0x11: 'E',
+0x01: 'NE',
}
def location(hexgrid_type, coord):
"""
Returns a formatted string representing the coordinate. The format depends on the
coordinate type.
Tiles look like: 1, 12
Nodes look like: (1 NW), (12 S)
Edges look like: (1 NW), (12 SE)
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param coord: integer coordinate in this module's hexadecimal coordinate system
:return: formatted string for display
"""
if hexgrid_type == TILE:
return str(coord)
elif hexgrid_type == NODE:
tile_id = nearest_tile_to_node(coord)
dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
elif hexgrid_type == EDGE:
tile_id = nearest_tile_to_edge(coord)
dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def from_location(hexgrid_type, tile_id, direction=None):
"""
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param tile_id: tile identifier, int
:param direction: str
:return: integer coordinate in this module's hexadecimal coordinate system
"""
if hexgrid_type == TILE:
if direction is not None:
raise ValueError('tiles do not have a direction')
return tile_id_to_coord(tile_id)
elif hexgrid_type == NODE:
return node_coord_in_direction(tile_id, direction)
elif hexgrid_type == EDGE:
return edge_coord_in_direction(tile_id, direction)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def coastal_tile_ids():
"""
Returns a list of tile identifiers which lie on the border of the grid.
"""
return list(filter(lambda tid: len(coastal_edges(tid)) > 0, legal_tile_ids()))
def coastal_coords():
"""
A coastal coord is a 2-tuple: (tile id, direction).
An edge is coastal if it is on the grid's border.
:return: list( (tile_id, direction) )
"""
coast = list()
for tile_id in coastal_tile_ids():
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in coastal_edges(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
coast.append((tile_id, dirn))
# logging.debug('coast={}'.format(coast))
return coast
def coastal_edges(tile_id):
"""
Returns a list of coastal edge coordinate.
An edge is coastal if it is on the grid's border.
:return: list(int)
"""
edges = list()
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
edges.append(edge_coord)
return edges
def tile_id_in_direction(from_tile_id, direction):
"""
Variant on direction_to_tile. Returns None if there's no tile there.
:param from_tile_id: tile identifier, int
:param direction: str
:return: tile identifier, int or None
"""
coord_from = tile_id_to_coord(from_tile_id)
for offset, dirn in _tile_tile_offsets.items():
if dirn == direction:
coord_to = coord_from + offset
if coord_to in legal_tile_coords():
return tile_id_from_coord(coord_to)
return None
def direction_to_tile(from_tile_id, to_tile_id):
"""
Convenience method wrapping tile_tile_offset_to_direction. Used to get the direction
of the offset between two tiles. The tiles must be adjacent.
:param from_tile_id: tile identifier, int
:param to_tile_id: tile identifier, int
:return: direction from from_tile to to_tile, str
"""
coord_from = tile_id_to_coord(from_tile_id)
coord_to = tile_id_to_coord(to_tile_id)
direction = tile_tile_offset_to_direction(coord_to - coord_from)
# logging.debug('Tile direction: {}->{} is {}'.format(
# from_tile.tile_id,
# to_tile.tile_id,
# direction
# ))
return direction
def tile_tile_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-tile offset. The tiles must be adjacent.
:param offset: tile_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_tile_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-tile offset={:x}'.format(offset))
return 'ZZ'
def tile_node_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-node offset. The tile and node must be adjacent.
:param offset: node_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_node_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-node offset={:x}'.format(offset))
return 'ZZ'
def tile_edge_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-edge offset. The tile and edge must be adjacent.
:param offset: edge_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_edge_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-edge offset={:x}'.format(offset))
return 'ZZ'
def edge_coord_in_direction(tile_id, direction):
"""
Returns the edge coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: edge coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
if tile_edge_offset_to_direction(edge_coord - tile_coord) == direction:
return edge_coord
raise ValueError('No edge found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def node_coord_in_direction(tile_id, direction):
"""
Returns the node coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: node coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for node_coord in nodes_touching_tile(tile_id):
if tile_node_offset_to_direction(node_coord - tile_coord) == direction:
return node_coord
raise ValueError('No node found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def tile_id_to_coord(tile_id):
"""
Convert a tile identifier to its corresponding grid coordinate.
:param tile_id: tile identifier, Tile.tile_id
:return: coordinate of the tile, int
"""
try:
return _tile_id_to_coord[tile_id]
except KeyError:
logging.critical('Attempted conversion of non-existent tile_id={}'.format(tile_id))
return -1
def tile_id_from_coord(coord):
"""
Convert a tile coordinate to its corresponding tile identifier.
:param coord: coordinate of the tile, int
:return: tile identifier, Tile.tile_id
"""
for i, c in _tile_id_to_coord.items():
if c == coord:
return i
raise Exception('Tile id lookup failed, coord={} not found in map'.format(hex(coord)))
def nearest_tile_to_edge(edge_coord):
"""
Convenience method wrapping nearest_tile_to_edge_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_edge_using_tiles(legal_tile_ids(), edge_coord)
def nearest_tile_to_edge_using_tiles(tile_ids, edge_coord):
"""
Get the first tile found adjacent to the given edge. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if edge_coord - tile_id_to_coord(tile_id) in _tile_edge_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching edge={}'.format(edge_coord))
def nearest_tile_to_node(node_coord):
"""
Convenience method wrapping nearest_tile_to_node_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_node_using_tiles(legal_tile_ids(), node_coord)
def nearest_tile_to_node_using_tiles(tile_ids, node_coord):
"""
Get the first tile found adjacent to the given node. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if node_coord - tile_id_to_coord(tile_id) in _tile_node_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching node={}'.format(node_coord))
def edges_touching_tile(tile_id):
"""
Get a list of edge coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of edge coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
edges = []
for offset in _tile_edge_offsets.keys():
edges.append(coord + offset)
# logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))
return edges
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)]
def legal_edge_coords():
"""
Return all legal edge coordinates on the grid.
"""
edges = set()
for tile_id in legal_tile_ids():
for edge in edges_touching_tile(tile_id):
edges.add(edge)
logging.debug('Legal edge coords({})={}'.format(len(edges), edges))
return edges
def legal_tile_ids():
"""
Return all legal tile identifiers on the grid. In the range [1,19] inclusive.
"""
return set(_tile_id_to_coord.keys())
def legal_tile_coords():
"""
Return all legal tile coordinates on the grid
"""
return set(_tile_id_to_coord.values())
def hex_digit(coord, digit=1):
"""
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
"""
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
def coord_from_hex_digits(digit_1, digit_2):
"""
Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.
>> hex(coord_from_hex_digits(1, 3))
'0x13'
>> hex(coord_from_hex_digits(1, 10))
'0x1A'
:param digit_1: first digit, int
:param digit_2: second digit, int
:return: hexadecimal coordinate, int
"""
return digit_1*16 + digit_2
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type))
|
rosshamish/hexgrid
|
hexgrid.py
|
hex_digit
|
python
|
def hex_digit(coord, digit=1):
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
|
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
|
train
|
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L439-L450
| null |
"""
module hexgrid provides functions for working with a hexagonal settlers of catan grid.
This module implements the coordinate system described in Robert S. Thomas's PhD dissertation on
JSettlers2, Appendix A. See the project at https://github.com/jdmonin/JSettlers2 for details.
Grids have tiles, nodes, and edges. Tiles, nodes, and edges all have coordinates
on the grid. Tiles also have identifiers numbered counter-clockwise starting from
the north-west edge. There are 19 tiles.
Adjacent locations can be computed by adding an offset to the given location. These
offsets are defined as dictionaries named _<type1>_<type2>_offsets, mapping offset->direction.
This direction is a cardinal direction represented as a string.
The edge and node coordinate spaces share values. That is, the coordinate value is
not enough to uniquely identify a location on the grid. For that reason, it is recommended
to represent locations as a (CoordType, 0xCoord) pair, each of which is guaranteed
to be unique.
See individual methods for usage.
"""
import logging
__author__ = "Ross Anderson <ross.anderson@ualberta.ca>"
__version__ = "0.2.1"
EDGE = 0
NODE = 1
TILE = 2
_tile_id_to_coord = {
# 1-19 clockwise starting from Top-Left
1: 0x37, 12: 0x59, 11: 0x7B,
2: 0x35, 13: 0x57, 18: 0x79, 10: 0x9B,
3: 0x33, 14: 0x55, 19: 0x77, 17: 0x99, 9: 0xBB,
4: 0x53, 15: 0x75, 16: 0x97, 8: 0xB9,
5: 0x73, 6: 0x95, 7: 0xB7
}
_tile_tile_offsets = {
# tile_coord - tile_coord
-0x20: 'NW',
-0x22: 'W',
-0x02: 'SW',
+0x20: 'SE',
+0x22: 'E',
+0x02: 'NE',
}
_tile_node_offsets = {
# node_coord - tile_coord
+0x01: 'N',
-0x10: 'NW',
-0x01: 'SW',
+0x10: 'S',
+0x21: 'SE',
+0x12: 'NE',
}
_tile_edge_offsets = {
# edge_coord - tile_coord
-0x10: 'NW',
-0x11: 'W',
-0x01: 'SW',
+0x10: 'SE',
+0x11: 'E',
+0x01: 'NE',
}
def location(hexgrid_type, coord):
"""
Returns a formatted string representing the coordinate. The format depends on the
coordinate type.
Tiles look like: 1, 12
Nodes look like: (1 NW), (12 S)
Edges look like: (1 NW), (12 SE)
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param coord: integer coordinate in this module's hexadecimal coordinate system
:return: formatted string for display
"""
if hexgrid_type == TILE:
return str(coord)
elif hexgrid_type == NODE:
tile_id = nearest_tile_to_node(coord)
dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
elif hexgrid_type == EDGE:
tile_id = nearest_tile_to_edge(coord)
dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def from_location(hexgrid_type, tile_id, direction=None):
"""
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param tile_id: tile identifier, int
:param direction: str
:return: integer coordinate in this module's hexadecimal coordinate system
"""
if hexgrid_type == TILE:
if direction is not None:
raise ValueError('tiles do not have a direction')
return tile_id_to_coord(tile_id)
elif hexgrid_type == NODE:
return node_coord_in_direction(tile_id, direction)
elif hexgrid_type == EDGE:
return edge_coord_in_direction(tile_id, direction)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def coastal_tile_ids():
"""
Returns a list of tile identifiers which lie on the border of the grid.
"""
return list(filter(lambda tid: len(coastal_edges(tid)) > 0, legal_tile_ids()))
def coastal_coords():
"""
A coastal coord is a 2-tuple: (tile id, direction).
An edge is coastal if it is on the grid's border.
:return: list( (tile_id, direction) )
"""
coast = list()
for tile_id in coastal_tile_ids():
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in coastal_edges(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
coast.append((tile_id, dirn))
# logging.debug('coast={}'.format(coast))
return coast
def coastal_edges(tile_id):
"""
Returns a list of coastal edge coordinate.
An edge is coastal if it is on the grid's border.
:return: list(int)
"""
edges = list()
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
edges.append(edge_coord)
return edges
def tile_id_in_direction(from_tile_id, direction):
"""
Variant on direction_to_tile. Returns None if there's no tile there.
:param from_tile_id: tile identifier, int
:param direction: str
:return: tile identifier, int or None
"""
coord_from = tile_id_to_coord(from_tile_id)
for offset, dirn in _tile_tile_offsets.items():
if dirn == direction:
coord_to = coord_from + offset
if coord_to in legal_tile_coords():
return tile_id_from_coord(coord_to)
return None
def direction_to_tile(from_tile_id, to_tile_id):
"""
Convenience method wrapping tile_tile_offset_to_direction. Used to get the direction
of the offset between two tiles. The tiles must be adjacent.
:param from_tile_id: tile identifier, int
:param to_tile_id: tile identifier, int
:return: direction from from_tile to to_tile, str
"""
coord_from = tile_id_to_coord(from_tile_id)
coord_to = tile_id_to_coord(to_tile_id)
direction = tile_tile_offset_to_direction(coord_to - coord_from)
# logging.debug('Tile direction: {}->{} is {}'.format(
# from_tile.tile_id,
# to_tile.tile_id,
# direction
# ))
return direction
def tile_tile_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-tile offset. The tiles must be adjacent.
:param offset: tile_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_tile_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-tile offset={:x}'.format(offset))
return 'ZZ'
def tile_node_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-node offset. The tile and node must be adjacent.
:param offset: node_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_node_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-node offset={:x}'.format(offset))
return 'ZZ'
def tile_edge_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-edge offset. The tile and edge must be adjacent.
:param offset: edge_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_edge_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-edge offset={:x}'.format(offset))
return 'ZZ'
def edge_coord_in_direction(tile_id, direction):
"""
Returns the edge coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: edge coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
if tile_edge_offset_to_direction(edge_coord - tile_coord) == direction:
return edge_coord
raise ValueError('No edge found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def node_coord_in_direction(tile_id, direction):
"""
Returns the node coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: node coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for node_coord in nodes_touching_tile(tile_id):
if tile_node_offset_to_direction(node_coord - tile_coord) == direction:
return node_coord
raise ValueError('No node found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def tile_id_to_coord(tile_id):
"""
Convert a tile identifier to its corresponding grid coordinate.
:param tile_id: tile identifier, Tile.tile_id
:return: coordinate of the tile, int
"""
try:
return _tile_id_to_coord[tile_id]
except KeyError:
logging.critical('Attempted conversion of non-existent tile_id={}'.format(tile_id))
return -1
def tile_id_from_coord(coord):
"""
Convert a tile coordinate to its corresponding tile identifier.
:param coord: coordinate of the tile, int
:return: tile identifier, Tile.tile_id
"""
for i, c in _tile_id_to_coord.items():
if c == coord:
return i
raise Exception('Tile id lookup failed, coord={} not found in map'.format(hex(coord)))
def nearest_tile_to_edge(edge_coord):
"""
Convenience method wrapping nearest_tile_to_edge_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_edge_using_tiles(legal_tile_ids(), edge_coord)
def nearest_tile_to_edge_using_tiles(tile_ids, edge_coord):
"""
Get the first tile found adjacent to the given edge. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if edge_coord - tile_id_to_coord(tile_id) in _tile_edge_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching edge={}'.format(edge_coord))
def nearest_tile_to_node(node_coord):
"""
Convenience method wrapping nearest_tile_to_node_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_node_using_tiles(legal_tile_ids(), node_coord)
def nearest_tile_to_node_using_tiles(tile_ids, node_coord):
"""
Get the first tile found adjacent to the given node. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if node_coord - tile_id_to_coord(tile_id) in _tile_node_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching node={}'.format(node_coord))
def edges_touching_tile(tile_id):
"""
Get a list of edge coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of edge coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
edges = []
for offset in _tile_edge_offsets.keys():
edges.append(coord + offset)
# logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))
return edges
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)]
def legal_edge_coords():
"""
Return all legal edge coordinates on the grid.
"""
edges = set()
for tile_id in legal_tile_ids():
for edge in edges_touching_tile(tile_id):
edges.add(edge)
logging.debug('Legal edge coords({})={}'.format(len(edges), edges))
return edges
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
def legal_tile_ids():
"""
Return all legal tile identifiers on the grid. In the range [1,19] inclusive.
"""
return set(_tile_id_to_coord.keys())
def legal_tile_coords():
"""
Return all legal tile coordinates on the grid
"""
return set(_tile_id_to_coord.values())
def coord_from_hex_digits(digit_1, digit_2):
"""
Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.
>> hex(coord_from_hex_digits(1, 3))
'0x13'
>> hex(coord_from_hex_digits(1, 10))
'0x1A'
:param digit_1: first digit, int
:param digit_2: second digit, int
:return: hexadecimal coordinate, int
"""
return digit_1*16 + digit_2
def rotate_direction(hexgrid_type, direction, ccw=True):
"""
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
"""
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type))
|
rosshamish/hexgrid
|
hexgrid.py
|
rotate_direction
|
python
|
def rotate_direction(hexgrid_type, direction, ccw=True):
if hexgrid_type in [TILE, EDGE]:
directions = ['NW', 'W', 'SW', 'SE', 'E', 'NE', 'NW'] if ccw \
else ['NW', 'NE', 'E', 'SE', 'SW', 'W', 'NW']
return directions[directions.index(direction) + 1]
elif hexgrid_type in [NODE]:
directions = ['N', 'NW', 'SW', 'S', 'SE', 'NE', 'N'] if ccw \
else ['N', 'NE', 'SE', 'S', 'SW', 'NW', 'N']
return directions[directions.index(direction) + 1]
else:
raise ValueError('Invalid hexgrid type={} passed to rotate direction'.format(hexgrid_type))
|
Takes a direction string associated with a type of hexgrid element, and rotates it one tick in the given direction.
:param direction: string, eg 'NW', 'N', 'SE'
:param ccw: if True, rotates counter clockwise. Otherwise, rotates clockwise.
:return: the rotated direction string, eg 'SW', 'NW', 'S'
|
train
|
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L469-L485
| null |
"""
module hexgrid provides functions for working with a hexagonal settlers of catan grid.
This module implements the coordinate system described in Robert S. Thomas's PhD dissertation on
JSettlers2, Appendix A. See the project at https://github.com/jdmonin/JSettlers2 for details.
Grids have tiles, nodes, and edges. Tiles, nodes, and edges all have coordinates
on the grid. Tiles also have identifiers numbered counter-clockwise starting from
the north-west edge. There are 19 tiles.
Adjacent locations can be computed by adding an offset to the given location. These
offsets are defined as dictionaries named _<type1>_<type2>_offsets, mapping offset->direction.
This direction is a cardinal direction represented as a string.
The edge and node coordinate spaces share values. That is, the coordinate value is
not enough to uniquely identify a location on the grid. For that reason, it is recommended
to represent locations as a (CoordType, 0xCoord) pair, each of which is guaranteed
to be unique.
See individual methods for usage.
"""
import logging
__author__ = "Ross Anderson <ross.anderson@ualberta.ca>"
__version__ = "0.2.1"
EDGE = 0
NODE = 1
TILE = 2
_tile_id_to_coord = {
# 1-19 clockwise starting from Top-Left
1: 0x37, 12: 0x59, 11: 0x7B,
2: 0x35, 13: 0x57, 18: 0x79, 10: 0x9B,
3: 0x33, 14: 0x55, 19: 0x77, 17: 0x99, 9: 0xBB,
4: 0x53, 15: 0x75, 16: 0x97, 8: 0xB9,
5: 0x73, 6: 0x95, 7: 0xB7
}
_tile_tile_offsets = {
# tile_coord - tile_coord
-0x20: 'NW',
-0x22: 'W',
-0x02: 'SW',
+0x20: 'SE',
+0x22: 'E',
+0x02: 'NE',
}
_tile_node_offsets = {
# node_coord - tile_coord
+0x01: 'N',
-0x10: 'NW',
-0x01: 'SW',
+0x10: 'S',
+0x21: 'SE',
+0x12: 'NE',
}
_tile_edge_offsets = {
# edge_coord - tile_coord
-0x10: 'NW',
-0x11: 'W',
-0x01: 'SW',
+0x10: 'SE',
+0x11: 'E',
+0x01: 'NE',
}
def location(hexgrid_type, coord):
"""
Returns a formatted string representing the coordinate. The format depends on the
coordinate type.
Tiles look like: 1, 12
Nodes look like: (1 NW), (12 S)
Edges look like: (1 NW), (12 SE)
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param coord: integer coordinate in this module's hexadecimal coordinate system
:return: formatted string for display
"""
if hexgrid_type == TILE:
return str(coord)
elif hexgrid_type == NODE:
tile_id = nearest_tile_to_node(coord)
dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
elif hexgrid_type == EDGE:
tile_id = nearest_tile_to_edge(coord)
dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id))
return '({} {})'.format(tile_id, dirn)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def from_location(hexgrid_type, tile_id, direction=None):
"""
:param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE
:param tile_id: tile identifier, int
:param direction: str
:return: integer coordinate in this module's hexadecimal coordinate system
"""
if hexgrid_type == TILE:
if direction is not None:
raise ValueError('tiles do not have a direction')
return tile_id_to_coord(tile_id)
elif hexgrid_type == NODE:
return node_coord_in_direction(tile_id, direction)
elif hexgrid_type == EDGE:
return edge_coord_in_direction(tile_id, direction)
else:
logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type))
return None
def coastal_tile_ids():
"""
Returns a list of tile identifiers which lie on the border of the grid.
"""
return list(filter(lambda tid: len(coastal_edges(tid)) > 0, legal_tile_ids()))
def coastal_coords():
"""
A coastal coord is a 2-tuple: (tile id, direction).
An edge is coastal if it is on the grid's border.
:return: list( (tile_id, direction) )
"""
coast = list()
for tile_id in coastal_tile_ids():
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in coastal_edges(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
coast.append((tile_id, dirn))
# logging.debug('coast={}'.format(coast))
return coast
def coastal_edges(tile_id):
"""
Returns a list of coastal edge coordinate.
An edge is coastal if it is on the grid's border.
:return: list(int)
"""
edges = list()
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)
if tile_id_in_direction(tile_id, dirn) is None:
edges.append(edge_coord)
return edges
def tile_id_in_direction(from_tile_id, direction):
"""
Variant on direction_to_tile. Returns None if there's no tile there.
:param from_tile_id: tile identifier, int
:param direction: str
:return: tile identifier, int or None
"""
coord_from = tile_id_to_coord(from_tile_id)
for offset, dirn in _tile_tile_offsets.items():
if dirn == direction:
coord_to = coord_from + offset
if coord_to in legal_tile_coords():
return tile_id_from_coord(coord_to)
return None
def direction_to_tile(from_tile_id, to_tile_id):
"""
Convenience method wrapping tile_tile_offset_to_direction. Used to get the direction
of the offset between two tiles. The tiles must be adjacent.
:param from_tile_id: tile identifier, int
:param to_tile_id: tile identifier, int
:return: direction from from_tile to to_tile, str
"""
coord_from = tile_id_to_coord(from_tile_id)
coord_to = tile_id_to_coord(to_tile_id)
direction = tile_tile_offset_to_direction(coord_to - coord_from)
# logging.debug('Tile direction: {}->{} is {}'.format(
# from_tile.tile_id,
# to_tile.tile_id,
# direction
# ))
return direction
def tile_tile_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-tile offset. The tiles must be adjacent.
:param offset: tile_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_tile_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-tile offset={:x}'.format(offset))
return 'ZZ'
def tile_node_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-node offset. The tile and node must be adjacent.
:param offset: node_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_node_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-node offset={:x}'.format(offset))
return 'ZZ'
def tile_edge_offset_to_direction(offset):
"""
Get the cardinal direction of a tile-edge offset. The tile and edge must be adjacent.
:param offset: edge_coord - tile_coord, int
:return: direction of the offset, str
"""
try:
return _tile_edge_offsets[offset]
except KeyError:
logging.critical('Attempted getting direction of non-existent tile-edge offset={:x}'.format(offset))
return 'ZZ'
def edge_coord_in_direction(tile_id, direction):
"""
Returns the edge coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: edge coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for edge_coord in edges_touching_tile(tile_id):
if tile_edge_offset_to_direction(edge_coord - tile_coord) == direction:
return edge_coord
raise ValueError('No edge found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def node_coord_in_direction(tile_id, direction):
"""
Returns the node coordinate in the given direction at the given tile identifier.
:param tile_id: tile identifier, int
:param direction: direction, str
:return: node coord, int
"""
tile_coord = tile_id_to_coord(tile_id)
for node_coord in nodes_touching_tile(tile_id):
if tile_node_offset_to_direction(node_coord - tile_coord) == direction:
return node_coord
raise ValueError('No node found in direction={} at tile_id={}'.format(
direction,
tile_id
))
def tile_id_to_coord(tile_id):
"""
Convert a tile identifier to its corresponding grid coordinate.
:param tile_id: tile identifier, Tile.tile_id
:return: coordinate of the tile, int
"""
try:
return _tile_id_to_coord[tile_id]
except KeyError:
logging.critical('Attempted conversion of non-existent tile_id={}'.format(tile_id))
return -1
def tile_id_from_coord(coord):
"""
Convert a tile coordinate to its corresponding tile identifier.
:param coord: coordinate of the tile, int
:return: tile identifier, Tile.tile_id
"""
for i, c in _tile_id_to_coord.items():
if c == coord:
return i
raise Exception('Tile id lookup failed, coord={} not found in map'.format(hex(coord)))
def nearest_tile_to_edge(edge_coord):
"""
Convenience method wrapping nearest_tile_to_edge_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_edge_using_tiles(legal_tile_ids(), edge_coord)
def nearest_tile_to_edge_using_tiles(tile_ids, edge_coord):
"""
Get the first tile found adjacent to the given edge. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param edge_coord: edge coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if edge_coord - tile_id_to_coord(tile_id) in _tile_edge_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching edge={}'.format(edge_coord))
def nearest_tile_to_node(node_coord):
"""
Convenience method wrapping nearest_tile_to_node_using_tiles. Looks at all tiles in legal_tile_ids().
Returns a tile identifier.
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
return nearest_tile_to_node_using_tiles(legal_tile_ids(), node_coord)
def nearest_tile_to_node_using_tiles(tile_ids, node_coord):
"""
Get the first tile found adjacent to the given node. Returns a tile identifier.
:param tile_ids: tiles to look at for adjacency, list(Tile.tile_id)
:param node_coord: node coordinate to find an adjacent tile to, int
:return: tile identifier of an adjacent tile, Tile.tile_id
"""
for tile_id in tile_ids:
if node_coord - tile_id_to_coord(tile_id) in _tile_node_offsets.keys():
return tile_id
logging.critical('Did not find a tile touching node={}'.format(node_coord))
def edges_touching_tile(tile_id):
"""
Get a list of edge coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of edge coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
edges = []
for offset in _tile_edge_offsets.keys():
edges.append(coord + offset)
# logging.debug('tile_id={}, edges touching={}'.format(tile_id, edges))
return edges
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)]
def legal_edge_coords():
"""
Return all legal edge coordinates on the grid.
"""
edges = set()
for tile_id in legal_tile_ids():
for edge in edges_touching_tile(tile_id):
edges.add(edge)
logging.debug('Legal edge coords({})={}'.format(len(edges), edges))
return edges
def legal_node_coords():
"""
Return all legal node coordinates on the grid
"""
nodes = set()
for tile_id in legal_tile_ids():
for node in nodes_touching_tile(tile_id):
nodes.add(node)
logging.debug('Legal node coords({})={}'.format(len(nodes), nodes))
return nodes
def legal_tile_ids():
"""
Return all legal tile identifiers on the grid. In the range [1,19] inclusive.
"""
return set(_tile_id_to_coord.keys())
def legal_tile_coords():
"""
Return all legal tile coordinates on the grid
"""
return set(_tile_id_to_coord.values())
def hex_digit(coord, digit=1):
"""
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
"""
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
def coord_from_hex_digits(digit_1, digit_2):
"""
Returns an integer representing the hexadecimal coordinate with the two given hexadecimal digits.
>> hex(coord_from_hex_digits(1, 3))
'0x13'
>> hex(coord_from_hex_digits(1, 10))
'0x1A'
:param digit_1: first digit, int
:param digit_2: second digit, int
:return: hexadecimal coordinate, int
"""
return digit_1*16 + digit_2
|
weijia/djangoautoconf
|
djangoautoconf/django_zip_template_loader.py
|
load_template_source
|
python
|
def load_template_source(template_name, template_dirs=None):
template_zipfiles = getattr(settings, "TEMPLATE_ZIP_FILES", [])
# Try each ZIP file in TEMPLATE_ZIP_FILES.
for fname in template_zipfiles:
try:
z = zipfile.ZipFile(fname)
source = z.read(template_name)
except (IOError, KeyError):
continue
z.close()
# We found a template, so return the source.
template_path = "%s:%s" % (fname, template_name)
return (source, template_path)
# If we reach here, the template couldn't be loaded
raise TemplateDoesNotExist(template_name)
|
Template loader that loads templates from a ZIP file.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/django_zip_template_loader.py#L6-L23
| null |
import zipfile
from django.conf import settings
from django.template import TemplateDoesNotExist
# This loader is always usable (since zipfile is included with Python)
load_template_source.is_usable = True
|
weijia/djangoautoconf
|
djangoautoconf/django_autoconf.py
|
DjangoAutoConf.set_settings_env
|
python
|
def set_settings_env(executable_folder=None):
executable_folder = executable_folder or get_executable_folder()
# print "!!!!!!!!!!!!!! executable:", executable_folder
if os.path.exists(os.path.join(executable_folder, "local/total_settings.py")):
print("Using total settings")
os.chdir(executable_folder)
os.environ["DJANGO_SETTINGS_MODULE"] = "local.total_settings"
os.environ["STATIC_ROOT"] = os.path.join(executable_folder, "static")
os.environ["MEDIA_ROOT"] = os.path.join(executable_folder, "media")
else:
os.environ.setdefault('ROOT_DIR', get_folder(get_inspection_frame(2)))
os.environ["DJANGO_SETTINGS_MODULE"] = "djangoautoconf.base_settings"
|
Add all application folders
:param executable_folder: the folder that contains local and external_app_repos
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/django_autoconf.py#L111-L127
| null |
class DjangoAutoConf(DjangoSettingManager):
"""
external_app_repos
repo folder
server app folders/modules may be imported
"""
AUTO_DETECT_CONFIG_FILENAME = "default_settings.py"
def __init__(self, default_settings_import_str=None):
super(DjangoAutoConf, self).__init__(default_settings_import_str)
# Default keys is located at ../keys relative to universal_settings module?
# self.extra_settings_in_base_package_folder = "others/extra_settings" # base extra setting
self.key_dir = None
self.local_key_folder = None
self.extra_setting_module_full_names = []
self.project_path = None
self.server_base_package_folder = "server_base_packages"
self.local_key_folder_relative_to_root = os.path.join(self.local_folder_name, self.local_key_folder_name)
self.external_apps_folder = None
self.installed_app_list = None
self.external_app_repositories = None
self.external_app_repositories_full_path = None
self.external_app_folder_name = None
def get_full_path(self, relative_path):
return os.path.join(self.root_dir, relative_path)
def set_external_app_repositories(self, external_app_repositories):
if os.path.isabs(external_app_repositories):
# print external_app_repositories
self.external_app_repositories_full_path = external_app_repositories
else:
# print self.root_dir, external_app_repositories, "not abs"
self.external_app_repositories_full_path = os.path.join(self.root_dir, external_app_repositories)
self.external_app_repositories = external_app_repositories
# self.add_extra_setting_relative_folder_for_repo(external_app_repositories)
logging.debug("Added: " + external_app_repositories)
full_path_of_repo_root = self.get_full_path(external_app_repositories)
for folder_full_path in enum_folders(full_path_of_repo_root):
if os.path.isdir(folder_full_path):
logging.debug("Scanning: " + folder_full_path)
include_all_direct_subfolders(folder_full_path)
def set_external_app_folder_name(self, external_app_folder_name):
self.external_app_folder_name = external_app_folder_name
def set_root_dir(self, root_dir):
self.root_dir = os.path.abspath(root_dir)
self.project_path = os.path.abspath(os.path.abspath(self.root_dir))
self.local_key_folder = os.path.join(self.root_dir, self.local_key_folder_relative_to_root)
self.local_app_setting_folders.append(os.path.join(self.root_dir, self.local_settings_relative_folder))
self.setting_storage = ObjectSettingStorage(self.root_dir)
def set_key_dir(self, key_dir):
self.key_dir = key_dir
self.local_key_folder = os.path.join(self.key_dir, self.local_key_folder_name)
def set_local_key_folder(self, local_key_folder):
self.local_key_folder = local_key_folder
def configure(self, features=[]):
self.__check_params()
self.set_settings_env()
self.load_settings_in_project_template()
self.set_project_folders_in_settings()
self.load_all_extra_settings(features)
self.setting_storage.add_secret_key(self.get_or_create_secret_key(self.get_local_key_folder()))
self.update_installed_apps_etc()
# self.setting_storage.remove_empty_list()
self.setting_storage.refine_attributes()
dump_attrs(self.setting_storage.get_settings())
@staticmethod
def exe():
from django.core.management import execute_from_command_line
argv = sys.argv
if argv[0] == "manage.exe":
argv = [""].extend(argv)
execute_from_command_line(argv)
@staticmethod
def __check_params(self):
if not os.path.exists(self.root_dir):
raise RootDirNotExist
if not os.path.exists(self.local_key_folder):
# logging.getLogger().error("key dir not exist: "+self.key_dir)
print("key dir not exist: " + self.local_key_folder)
raise LocalKeyFolderNotExist
def get_local_key_folder(self):
if self.local_key_folder is None:
return os.path.join(self.key_dir, "local_keys")
return self.local_key_folder
def get_project_path(self):
if self.project_path is None:
raise "Root path is not set"
return self.project_path
# noinspection PyMethodMayBeStatic
def is_valid_app_module(self, app_module_folder_full_path):
signature_filename_list = [self.AUTO_DETECT_CONFIG_FILENAME, "default_urls.py", "urls.py"]
return os.path.isdir(app_module_folder_full_path) and is_at_least_one_sub_filesystem_item_exists(
app_module_folder_full_path, signature_filename_list)
def get_external_apps_folder(self):
if self.external_apps_folder is None:
self.external_apps_folder = os.path.join(self.get_project_path(), self.external_app_folder_name)
return self.external_apps_folder
def get_external_apps_repositories(self):
if self.external_app_repositories_full_path is None:
return [self.get_external_apps_folder(), ]
else:
return enum_folders(self.external_app_repositories_full_path)
def enum_app_root_folders_in_repo(self):
for repo in self.get_external_apps_repositories():
for apps_root_folder in enum_folders(repo):
yield apps_root_folder
def enum_app_module_folders(self):
for app_root_folder in self.enum_app_root_folders_in_repo():
for app_module_folder in enum_folders(app_root_folder):
yield app_module_folder
def install_auto_detected_apps(self):
self.installed_app_list = self.setting_storage.get_installed_apps()
for app_module_folder in self.enum_app_module_folders():
if self.is_valid_app_module(app_module_folder):
app_module_folder_name = os.path.basename(app_module_folder)
app_root_folder = os.path.dirname(app_module_folder)
include(app_root_folder)
self.installed_app_list.append(app_module_folder_name)
self.setting_storage.set_attr("INSTALLED_APPS", tuple(self.installed_app_list))
def update_installed_apps_etc(self):
self.install_auto_detected_apps()
def set_project_folders_in_settings(self):
self.setting_storage.set_attr("PROJECT_PATH", self.get_project_path())
# setattr(base_settings, "TEMPLATE_CONTEXT_PROCESSORS", tuple())
self.setting_storage.set_attr("DJANGO_AUTO_CONF_LOCAL_DIR", os.path.join(
self.get_project_path(), self.local_folder_name))
self.setting_storage.set_attr("STATIC_ROOT", os.path.abspath(os.path.join(self.get_project_path(), 'static')))
def load_all_extra_settings(self, features):
self.update_base_settings_with_features(features)
self.load_extra_settings_in_folders()
self.__load_default_setting_from_apps()
def __load_default_setting_from_apps(self):
for app_module_folder in self.enum_app_module_folders():
default_settings_full_path = os.path.join(app_module_folder, self.AUTO_DETECT_CONFIG_FILENAME)
if os.path.exists(default_settings_full_path) and not os.path.isdir(default_settings_full_path):
app_module_folder_name = os.path.basename(app_module_folder)
app_root_folder = os.path.dirname(app_module_folder)
include(app_root_folder)
self.setting_storage.import_based_on_base_settings(
"%s.%s" % (app_module_folder_name,
self.AUTO_DETECT_CONFIG_FILENAME.split(
".")[0]))
remove_folder_in_sys_path(app_root_folder)
def load_settings_in_project_template(self):
if self.setting_storage.is_above_or_equal_to_django1_11():
full_path = os.path.join(get_folder(django.__file__), "conf/project_template/project_name/settings.py-tpl")
f = open(full_path)
module_content = f.read()
self.setting_storage.eval_content(module_content)
else:
template_root = os.path.join(get_folder(django.__file__), "conf/project_template/")
include(template_root)
self.setting_storage.import_based_on_base_settings("project_name.settings")
exclude(template_root)
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/detail_with_inline_view.py
|
all_valid
|
python
|
def all_valid(formsets):
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
Returns true if every formset in formsets is valid.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/detail_with_inline_view.py#L7-L13
| null |
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.views.generic import DetailView
class DetailWithInlineView(DetailView):
template_name = "detail_with_inline_view.html"
inlines = []
model = None
success_url = ""
def get_context_data(self, **kwargs):
context = super(DetailWithInlineView, self).get_context_data(**kwargs)
inlines = self.construct_inlines()
context.update({"inlines": inlines})
return context
def get_inlines(self):
"""
Returns the inline formset classes
"""
return self.inlines
def forms_valid(self, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
return self.render_to_response(self.get_context_data(inlines=inlines))
def construct_inlines(self):
"""
Returns the inline formset instances
"""
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = self.get_object()
self.get_context_data()
inlines = self.construct_inlines()
if all_valid(inlines):
return self.forms_valid(inlines)
return self.forms_invalid(inlines)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/detail_with_inline_view.py
|
DetailWithInlineView.forms_valid
|
python
|
def forms_valid(self, inlines):
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
|
If the form and formsets are valid, save the associated models.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/detail_with_inline_view.py#L34-L40
| null |
class DetailWithInlineView(DetailView):
template_name = "detail_with_inline_view.html"
inlines = []
model = None
success_url = ""
def get_context_data(self, **kwargs):
context = super(DetailWithInlineView, self).get_context_data(**kwargs)
inlines = self.construct_inlines()
context.update({"inlines": inlines})
return context
def get_inlines(self):
"""
Returns the inline formset classes
"""
return self.inlines
def forms_invalid(self, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
return self.render_to_response(self.get_context_data(inlines=inlines))
def construct_inlines(self):
"""
Returns the inline formset instances
"""
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = self.get_object()
self.get_context_data()
inlines = self.construct_inlines()
if all_valid(inlines):
return self.forms_valid(inlines)
return self.forms_invalid(inlines)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/detail_with_inline_view.py
|
DetailWithInlineView.post
|
python
|
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.get_context_data()
inlines = self.construct_inlines()
if all_valid(inlines):
return self.forms_valid(inlines)
return self.forms_invalid(inlines)
|
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/detail_with_inline_view.py#L60-L71
|
[
"def all_valid(formsets):\n \"\"\"Returns true if every formset in formsets is valid.\"\"\"\n valid = True\n for formset in formsets:\n if not formset.is_valid():\n valid = False\n return valid\n"
] |
class DetailWithInlineView(DetailView):
template_name = "detail_with_inline_view.html"
inlines = []
model = None
success_url = ""
def get_context_data(self, **kwargs):
context = super(DetailWithInlineView, self).get_context_data(**kwargs)
inlines = self.construct_inlines()
context.update({"inlines": inlines})
return context
def get_inlines(self):
"""
Returns the inline formset classes
"""
return self.inlines
def forms_valid(self, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
return self.render_to_response(self.get_context_data(inlines=inlines))
def construct_inlines(self):
"""
Returns the inline formset instances
"""
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/detail_with_inline_view.py
|
DetailWithInlineView.get_success_url
|
python
|
def get_success_url(self):
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
|
Returns the supplied success URL.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/detail_with_inline_view.py#L78-L88
| null |
class DetailWithInlineView(DetailView):
template_name = "detail_with_inline_view.html"
inlines = []
model = None
success_url = ""
def get_context_data(self, **kwargs):
context = super(DetailWithInlineView, self).get_context_data(**kwargs)
inlines = self.construct_inlines()
context.update({"inlines": inlines})
return context
def get_inlines(self):
"""
Returns the inline formset classes
"""
return self.inlines
def forms_valid(self, inlines):
"""
If the form and formsets are valid, save the associated models.
"""
for formset in inlines:
formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, inlines):
"""
If the form or formsets are invalid, re-render the context data with the
data-filled form and formsets and errors.
"""
return self.render_to_response(self.get_context_data(inlines=inlines))
def construct_inlines(self):
"""
Returns the inline formset instances
"""
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form and formset instances with the passed
POST variables and then checked for validity.
"""
self.object = self.get_object()
self.get_context_data()
inlines = self.construct_inlines()
if all_valid(inlines):
return self.forms_valid(inlines)
return self.forms_invalid(inlines)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/create_view_factory.py
|
create_ajaxable_view_from_model_inherit_parent_class
|
python
|
def create_ajaxable_view_from_model_inherit_parent_class(model_class, parent_class_list, operation="Create"):
generic_module = importlib.import_module("django.views.generic")
view_class_name = "%sView" % operation
view_class = getattr(generic_module, view_class_name)
# parent_class_tuple = (ajax_mixin, AjaxableFormContextUpdateMixin, view_class)
parent_class_list.append(view_class)
create_view_class = type("%s%s%s" % (model_class.__name__, operation, "View"),
tuple(parent_class_list), {
# "Meta": type("Meta", (), {"model": self.model_class, "fields": []}),
"model": model_class,
"template_name": "form_view_base_template.html",
"submit_button_text": operation,
"success_url": "../"
})
return create_view_class
|
:param model_class: the django model class
:param operation: "Create" or "Update"
:param ajax_mixin: user may pass a sub class of AjaxableResponseMixin to put more info in the ajax response
:return: dynamically generated class based view. The instance of the view class has as_view method.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/create_view_factory.py#L33-L53
| null |
import importlib
# from compat import JsonResponse
# from django.views.generic import CreateView, UpdateView
from djangoautoconf.class_based_views.ajax_views import AjaxableResponseMixin
# def get_ajax_create_view_from_model(model_class):
# create_view_class = type(model_class.__name__ + "CreateView",
# (AjaxableResponseMixin, CreateView), {
# # "Meta": type("Meta", (), {"model": self.model_class, "fields": []}),
# "model": model_class,
# "template_name": "form_view_base_template.html",
# })
# return create_view_class
class AjaxableFormContextUpdateMixin(object):
submit_button_text = "Update"
ajax_form_id = "UpdateForm"
def get_context_data(self, **kwargs):
# super_class = super(self.__class__, self)
# super_class.remove(ContextUpdateMixin)
for class_instance in self.__class__.__bases__:
if class_instance is AjaxableFormContextUpdateMixin or not hasattr(class_instance, "get_context_data"):
continue
context = class_instance.get_context_data(self, **kwargs)
context["submit_button_text"] = self.submit_button_text
context["ajax_form_id"] = self.ajax_form_id
return context
def create_ajaxable_view_from_model(model_class, operation="Create", ajax_mixin=AjaxableResponseMixin):
# """
# :param model_class: the django model class
# :param operation: "Create" or "Update"
# :param ajax_mixin: user may pass a sub class of AjaxableResponseMixin to put more info in the ajax response
# :return: dynamically generated class based view. The instance of the view class has as_view method.
# """
# generic_module = importlib.import_module("django.views.generic")
# view_class_name = "%sView" % operation
# view_class = getattr(generic_module, view_class_name)
# create_view_class = type("%s%s%s" % (model_class.__name__, operation, "View"),
# (ajax_mixin, AjaxableFormContextUpdateMixin, view_class), {
# # "Meta": type("Meta", (), {"model": self.model_class, "fields": []}),
# "model": model_class,
# "template_name": "form_view_base_template.html",
# "submit_button_text": operation,
# })
# return create_view_class
return create_ajaxable_view_from_model_inherit_parent_class(model_class, [ajax_mixin, AjaxableFormContextUpdateMixin])
|
weijia/djangoautoconf
|
djangoautoconf/auto_conf_admin_tools/admin_register.py
|
AdminRegister.register_all_models
|
python
|
def register_all_models(self, module_instance, exclude_name_list=[]):
for class_instance in class_enumerator(module_instance, exclude_name_list):
if is_inherit_from_model(class_instance):
self.register(class_instance)
|
:param module_instance: the module instance that containing models.Model inherited classes,
mostly the models module
:param exclude_name_list: class does not need to register or is already registered
:return: N/A
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/auto_conf_admin_tools/admin_register.py#L143-L152
|
[
"def is_inherit_from_model(class_inst):\n if models.Model in class_inst.__bases__:\n return True\n for parent_class in class_inst.__bases__:\n if parent_class is object:\n continue\n return is_inherit_from_model(parent_class)\n return False\n",
"def register(self, class_inst, parent_admin=[]):\n admin_class = self.get_valid_admin_class_with_list(class_inst, parent_admin=[])\n self.register_admin_without_duplicated_register(class_inst, admin_class)\n"
] |
class AdminRegister(object):
default_feature_list = []
default_feature_list_base = [ListAndSearch,
ImportExportFeature,
GuardianFeature # , ReversionFeature
]
for feature in default_feature_list_base:
if feature is not None:
default_feature_list.append(feature)
def __init__(self,
admin_site_list=default_admin_site_list,
parent_admin_list=None,
feature_list=None):
super(AdminRegister, self).__init__()
parent_admin_list = parent_admin_list or []
self.admin_features = []
self.parent_admin_list = parent_admin_list
# self.base_model_admin = ModelAdmin
self.admin_class_attributes = {}
if feature_list is None:
feature_list = self.default_feature_list
for feature in feature_list:
self.add_feature(feature())
self.instant_admin_attr = {}
self.admin_site_list = admin_site_list
def get_valid_admin_class_with_list(self, class_inst, parent_admin=[]):
copied_admin_list = copy.copy(self.parent_admin_list)
copied_admin_list.extend(parent_admin)
# copied_admin_list.append(self.base_model_admin)
for feature in self.admin_features:
feature.process_parent_class_list(copied_admin_list, class_inst)
feature.process_admin_class_attr(self.admin_class_attributes, class_inst)
# print ModelAdmin
# print final_parents
self.admin_class_attributes.update(self.instant_admin_attr)
for attr in self.admin_class_attributes:
new_method = self.admin_class_attributes[attr]
if isinstance(new_method, types.FunctionType):
self.admin_class_attributes[attr] = new_method
if self.is_model_admin_needed(copied_admin_list):
copied_admin_list = [ModelAdmin, ]
admin_class = type(class_inst.__name__ + "Admin", tuple(copied_admin_list), self.admin_class_attributes)
for feature in self.admin_features:
if hasattr(feature, "process_admin_class"):
feature.process_admin_class(admin_class, class_inst)
return admin_class
# noinspection PyMethodMayBeStatic
def is_model_admin_needed(self, copied_admin_list):
if len(copied_admin_list) == 0:
return True
for admin_class in copied_admin_list:
if "Mixin" in admin_class.__name__:
continue
else:
return False
return True
def register_admin_without_duplicated_register(self, class_inst, admin_class):
for admin_site in self.admin_site_list:
register_admin_without_duplicated_register(class_inst, admin_class, admin_site)
def register(self, class_inst, parent_admin=[]):
admin_class = self.get_valid_admin_class_with_list(class_inst, parent_admin=[])
self.register_admin_without_duplicated_register(class_inst, admin_class)
def register_with_instant_fields(self, class_inst, instant_admin_attr):
self.instant_admin_attr = instant_admin_attr
self.register(class_inst)
self.instant_admin_attr = {}
def register_all_model(self, module_instance, exclude_name_list=[]):
self.register_all_models(module_instance, exclude_name_list)
def add_feature(self, feature):
self.admin_features.append(feature)
|
weijia/djangoautoconf
|
djangoautoconf/ajax_select_utils/channel_creator_for_model.py
|
create_channels_for_related_fields_in_model
|
python
|
def create_channels_for_related_fields_in_model(model_class):
need_to_create_channel = []
for field in enum_model_fields(model_class):
if type(field) in get_relation_field_types():
if field.related_model == 'self':
need_to_create_channel.append(model_class)
elif field.related_field.model not in need_to_create_channel:
need_to_create_channel.append(field.related_field.model)
for field in enum_model_many_to_many(model_class):
if type(field) in get_relation_field_types():
if field.related_model not in need_to_create_channel:
need_to_create_channel.append(field.related_model)
for field_model_class in need_to_create_channel:
if class_name_to_low_case(field_model_class.__name__) not in registry._registry:
register_channel(field_model_class)
|
Create channel for the fields of the model, the channel name can be got by calling get_ajax_config_for_relation_fields
:param model_class:
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/ajax_select_utils/channel_creator_for_model.py#L9-L30
|
[
"def enum_model_fields(class_inst):\n \"\"\"\n ManyToManyField is not returned. If needed, use enum_model_fields_with_many_to_many instead\n :param class_inst:\n :return:\n \"\"\"\n return class_inst.__dict__['_meta'].fields\n",
"def register_channel(model_class, search_fields=()):\n \"\"\"\n Register channel for model\n :param model_class: model to register channel for\n :param search_fields:\n :return:\n \"\"\"\n if len(search_fields) == 0:\n search_fields = get_fields_with_icontains_filter(model_class)\n channel_class = type(model_class.__name__ + \"LookupChannel\",\n (AutoLookupChannelBase,),\n {\"model\": model_class,\n \"dynamical_search_fields\": search_fields,\n })\n channel_name = class_name_to_low_case(model_class.__name__)\n registry.register({channel_name: channel_class})\n",
"def get_relation_field_types():\n excluded_types = [models.ForeignKey, models.ManyToManyField]\n try:\n from mptt.models import TreeForeignKey\n excluded_types.append(TreeForeignKey)\n except ImportError:\n pass\n return excluded_types\n",
"def enum_model_many_to_many(class_inst):\n return class_inst.__dict__['_meta'].many_to_many\n"
] |
from ajax_select.registry import registry
from djangoautoconf.ajax_select_utils.ajax_select_channel_generator import register_channel
from djangoautoconf.model_utils.model_attr_utils import enum_model_fields, get_relation_field_types, \
enum_model_fields_with_many_to_many, enum_model_many_to_many, model_enumerator, enum_relation_field
from ufs_tools.string_tools import class_name_to_low_case
def add_channel_for_models_in_module(models):
for model_class in model_enumerator(models):
create_channels_for_related_fields_in_model(model_class)
register_channel(model_class)
def get_ajax_config_for_relation_fields(model_class):
field_names = []
ajax_mapping = {}
for field in enum_model_fields(model_class):
if type(field) in get_relation_field_types():
if field.related_model == 'self':
related_model = model_class
else:
related_model = field.related_field.model
field_names.append(field.name)
ajax_mapping[field.name] = get_low_case_model_class_name(related_model)
for field in enum_model_many_to_many(model_class):
if type(field) in get_relation_field_types():
related_model_class = field.related_model
if related_model_class not in field_names:
field_names.append(field.name)
ajax_mapping[field.name] = get_low_case_model_class_name(related_model_class)
return ajax_mapping
def get_low_case_model_class_name(model_class):
return class_name_to_low_case(model_class.__name__)
|
weijia/djangoautoconf
|
djangoautoconf/ajax_select_utils/ajax_select_channel_generator.py
|
register_channel
|
python
|
def register_channel(model_class, search_fields=()):
if len(search_fields) == 0:
search_fields = get_fields_with_icontains_filter(model_class)
channel_class = type(model_class.__name__ + "LookupChannel",
(AutoLookupChannelBase,),
{"model": model_class,
"dynamical_search_fields": search_fields,
})
channel_name = class_name_to_low_case(model_class.__name__)
registry.register({channel_name: channel_class})
|
Register channel for model
:param model_class: model to register channel for
:param search_fields:
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/ajax_select_utils/ajax_select_channel_generator.py#L38-L53
|
[
"def get_fields_with_icontains_filter(model_class):\n text_fields = []\n for field in enum_model_fields(model_class):\n if type(field) in (models.TextField, models.CharField, models.IntegerField):\n text_fields.append(field.name)\n return text_fields\n"
] |
from ajax_select import LookupChannel
from ajax_select.registry import registry
from django.conf.urls import url, include
from ajax_select import urls as ajax_select_urls
from django.db import models
from django.db.models import Q
from djangoautoconf.auto_conf_urls import add_to_root_url_pattern
from ufs_tools.string_tools import class_name_to_low_case
from djangoautoconf.model_utils.model_attr_utils import enum_model_fields
add_to_root_url_pattern(
(url(r'^ajax_select/', include(ajax_select_urls)),)
)
class AutoLookupChannelBase(LookupChannel):
def format_item_display(self, item):
return u"<span class='tag'>%s</span>" % unicode(item)
def get_query(self, q, request):
query = Q(pk__icontains=q)
for field in self.dynamical_search_fields:
param = {"%s__icontains" % field: q}
query |= Q(**param)
return self.model.objects.filter(query)[:10]
def get_fields_with_icontains_filter(model_class):
text_fields = []
for field in enum_model_fields(model_class):
if type(field) in (models.TextField, models.CharField, models.IntegerField):
text_fields.append(field.name)
return text_fields
|
weijia/djangoautoconf
|
djangoautoconf/obs/auto_conf_admin_utils.py
|
register_to_sys_with_admin_list
|
python
|
def register_to_sys_with_admin_list(class_inst, admin_list=None, is_normal_admin_needed=False):
if admin_list is None:
admin_class = get_valid_admin_class_with_list([], class_inst)
else:
admin_class = get_valid_admin_class_with_list(admin_list, class_inst)
if is_normal_admin_needed:
register_all_type_of_admin(admin_class, class_inst)
else:
register_admin(admin_class, class_inst)
|
:param class_inst: model class
:param admin_list: admin class
:param is_normal_admin_needed: is normal admin registration needed
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/obs/auto_conf_admin_utils.py#L61-L75
|
[
"def get_valid_admin_class_with_list(admin_list, class_inst):\n #print admin_list\n copied_admin_list = copy.copy(admin_list)\n copied_admin_list.append(SingleModelAdmin)\n #print ModelAdmin\n #print final_parents\n admin_class = type(class_inst.__name__ + \"Admin\", tuple(copied_admin_list), {})\n return admin_class\n",
"def register_admin(admin_class, class_inst, admin_site=admin.site):\n try:\n if not (class_inst in admin_site._registry):\n admin_site.register(class_inst, admin_class)\n except Exception, e:\n if True: # not (' is already registered' in e.message):\n print class_inst, admin_class\n import traceback\n traceback.print_exc()\n",
"def register_all_type_of_admin(admin_class, class_inst):\n register_admin(admin_class, class_inst)\n try:\n from normal_admin.admin import user_admin_site\n register_admin(admin_class, class_inst, user_admin_site)\n except ImportError:\n pass\n"
] |
import copy
import inspect
from django.conf import settings
if "guardian" in settings.INSTALLED_APPS:
from guardian.admin import GuardedModelAdmin as SingleModelAdmin
else:
from django.contrib.admin import ModelAdmin as SingleModelAdmin
#from django.contrib.admin import ModelAdmin
from django.contrib import admin
#The following not work with guardian?
#import xadmin as admin
def get_valid_admin_class(admin_class, class_inst):
if admin_class is None:
admin_class = type(class_inst.__name__ + "Admin", (SingleModelAdmin, ), {})
return admin_class
def register_admin(admin_class, class_inst, admin_site=admin.site):
try:
if not (class_inst in admin_site._registry):
admin_site.register(class_inst, admin_class)
except Exception, e:
if True: # not (' is already registered' in e.message):
print class_inst, admin_class
import traceback
traceback.print_exc()
def register_all_type_of_admin(admin_class, class_inst):
register_admin(admin_class, class_inst)
try:
from normal_admin.admin import user_admin_site
register_admin(admin_class, class_inst, user_admin_site)
except ImportError:
pass
def register_to_sys(class_inst, admin_class=None, is_normal_admin_needed=False):
admin_class = get_valid_admin_class(admin_class, class_inst)
if is_normal_admin_needed:
register_all_type_of_admin(admin_class, class_inst)
else:
register_admin(admin_class, class_inst)
def get_valid_admin_class_with_list(admin_list, class_inst):
#print admin_list
copied_admin_list = copy.copy(admin_list)
copied_admin_list.append(SingleModelAdmin)
#print ModelAdmin
#print final_parents
admin_class = type(class_inst.__name__ + "Admin", tuple(copied_admin_list), {})
return admin_class
def register_all(class_list, admin_class_list=None):
"""
:param class_list: list of class need to be registered to admin
:param admin_class_list: parent of admin model class
:return: no
"""
for i in class_list:
register_to_sys_with_admin_list(i, admin_class_list)
def register_all_in_module(module_instance, exclude_name_list=[], admin_class_list=None):
"""
:param module_instance: mostly the models module
:param exclude_name_list: class does not need to register or is already registered
:param admin_class_list:
:return:
"""
class_list = []
for name, obj in inspect.getmembers(module_instance):
if inspect.isclass(obj):
if obj.__name__ in exclude_name_list:
continue
class_list.append(obj)
#print class_list, admin_class_list
register_all(class_list, admin_class_list)
|
weijia/djangoautoconf
|
djangoautoconf/obs/auto_conf_admin_utils.py
|
register_all_in_module
|
python
|
def register_all_in_module(module_instance, exclude_name_list=[], admin_class_list=None):
class_list = []
for name, obj in inspect.getmembers(module_instance):
if inspect.isclass(obj):
if obj.__name__ in exclude_name_list:
continue
class_list.append(obj)
#print class_list, admin_class_list
register_all(class_list, admin_class_list)
|
:param module_instance: mostly the models module
:param exclude_name_list: class does not need to register or is already registered
:param admin_class_list:
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/obs/auto_conf_admin_utils.py#L88-L102
|
[
"def register_all(class_list, admin_class_list=None):\n \"\"\"\n :param class_list: list of class need to be registered to admin\n :param admin_class_list: parent of admin model class\n :return: no\n \"\"\"\n for i in class_list:\n register_to_sys_with_admin_list(i, admin_class_list)\n"
] |
import copy
import inspect
from django.conf import settings
if "guardian" in settings.INSTALLED_APPS:
from guardian.admin import GuardedModelAdmin as SingleModelAdmin
else:
from django.contrib.admin import ModelAdmin as SingleModelAdmin
#from django.contrib.admin import ModelAdmin
from django.contrib import admin
#The following not work with guardian?
#import xadmin as admin
def get_valid_admin_class(admin_class, class_inst):
if admin_class is None:
admin_class = type(class_inst.__name__ + "Admin", (SingleModelAdmin, ), {})
return admin_class
def register_admin(admin_class, class_inst, admin_site=admin.site):
try:
if not (class_inst in admin_site._registry):
admin_site.register(class_inst, admin_class)
except Exception, e:
if True: # not (' is already registered' in e.message):
print class_inst, admin_class
import traceback
traceback.print_exc()
def register_all_type_of_admin(admin_class, class_inst):
register_admin(admin_class, class_inst)
try:
from normal_admin.admin import user_admin_site
register_admin(admin_class, class_inst, user_admin_site)
except ImportError:
pass
def register_to_sys(class_inst, admin_class=None, is_normal_admin_needed=False):
admin_class = get_valid_admin_class(admin_class, class_inst)
if is_normal_admin_needed:
register_all_type_of_admin(admin_class, class_inst)
else:
register_admin(admin_class, class_inst)
def get_valid_admin_class_with_list(admin_list, class_inst):
#print admin_list
copied_admin_list = copy.copy(admin_list)
copied_admin_list.append(SingleModelAdmin)
#print ModelAdmin
#print final_parents
admin_class = type(class_inst.__name__ + "Admin", tuple(copied_admin_list), {})
return admin_class
def register_to_sys_with_admin_list(class_inst, admin_list=None, is_normal_admin_needed=False):
"""
:param class_inst: model class
:param admin_list: admin class
:param is_normal_admin_needed: is normal admin registration needed
:return:
"""
if admin_list is None:
admin_class = get_valid_admin_class_with_list([], class_inst)
else:
admin_class = get_valid_admin_class_with_list(admin_list, class_inst)
if is_normal_admin_needed:
register_all_type_of_admin(admin_class, class_inst)
else:
register_admin(admin_class, class_inst)
def register_all(class_list, admin_class_list=None):
"""
:param class_list: list of class need to be registered to admin
:param admin_class_list: parent of admin model class
:return: no
"""
for i in class_list:
register_to_sys_with_admin_list(i, admin_class_list)
|
weijia/djangoautoconf
|
djangoautoconf/django_adv_zip_template_loader.py
|
Loader.load_template_source
|
python
|
def load_template_source(self, template_name, template_dirs=None):
#Get every app's folder
log.error("Calling zip loader")
for folder in app_template_dirs:
if ".zip/" in folder.replace("\\", "/"):
lib_file, relative_folder = get_zip_file_and_relative_path(folder)
log.error(lib_file, relative_folder)
try:
z = zipfile.ZipFile(lib_file)
log.error(relative_folder + template_name)
template_path_in_zip = os.path.join(relative_folder, template_name).replace("\\", "/")
source = z.read(template_path_in_zip)
except (IOError, KeyError) as e:
import traceback
log.error(traceback.format_exc())
try:
z.close()
except:
pass
continue
z.close()
# We found a template, so return the source.
template_path = "%s:%s" % (lib_file, template_path_in_zip)
return (source, template_path)
# If we reach here, the template couldn't be loaded
raise TemplateDoesNotExist(template_name)
|
Template loader that loads templates from zipped modules.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/django_adv_zip_template_loader.py#L44-L71
|
[
"def get_zip_file_and_relative_path(full_path_into_zip):\n full_path_into_zip = full_path_into_zip.replace(\"\\\\\", \"/\")\n zip_ext = \".zip\"\n zip_ext_start_index = full_path_into_zip.find(zip_ext + \"/\")\n lib_path = full_path_into_zip[0:zip_ext_start_index] + zip_ext\n inner_path = full_path_into_zip[zip_ext_start_index + len(zip_ext) + 1:]\n return lib_path, inner_path\n"
] |
class Loader(BaseLoader):
is_usable = True
|
weijia/djangoautoconf
|
djangoautoconf/local_key_manager.py
|
get_local_key
|
python
|
def get_local_key(module_and_var_name, default_module=None):
if "-" in module_and_var_name:
raise ModuleAndVarNameShouldNotHaveDashCharacter
key_name_module_path = module_and_var_name.split(".")
module_name = ".".join(key_name_module_path[0:-1])
attr_name = key_name_module_path[-1]
c = ConfigurableAttributeGetter(module_name, default_module)
return c.get_attr(attr_name)
|
Get local setting for the keys.
:param module_and_var_name: for example: admin_account.admin_user, then you need to put admin_account.py in
local/local_keys/ and add variable admin_user="real admin username", module_name_and_var_name should not
contain "-" because
:param default_module: If the template can not be directly imported, use this to specify the parent module.
:return: value for the key
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/local_key_manager.py#L37-L52
|
[
"def get_attr(self, attr_name):\n try:\n m = self.get_module_of_local_keys()\n # return getattr(m, attr_name)\n except ImportError:\n # from management.commands.keys_default.admin_pass import default_admin_password, default_admin_user\n if self.default_module is None:\n m = importlib.import_module(\"%s_template\" % self.module_of_attribute)\n else:\n m = importlib.import_module(\"%s.%s_template\" % (self.default_module, self.module_of_attribute))\n return getattr(m, attr_name)\n"
] |
import importlib
class ConfigurableAttributeGetter(object):
def __init__(self, module_of_attribute, default_module=None):
super(ConfigurableAttributeGetter, self).__init__()
self.module_of_attribute = module_of_attribute
self.default_module = default_module
def get_module_of_local_keys(self):
exception = None
for module_path in ["local.local_keys", "keys.local_keys"]:
try:
m = importlib.import_module("%s.%s" % (module_path, self.module_of_attribute))
return m
except ImportError, e:
exception = e
raise exception
def get_attr(self, attr_name):
try:
m = self.get_module_of_local_keys()
# return getattr(m, attr_name)
except ImportError:
# from management.commands.keys_default.admin_pass import default_admin_password, default_admin_user
if self.default_module is None:
m = importlib.import_module("%s_template" % self.module_of_attribute)
else:
m = importlib.import_module("%s.%s_template" % (self.default_module, self.module_of_attribute))
return getattr(m, attr_name)
class ModuleAndVarNameShouldNotHaveDashCharacter(Exception):
pass
def get_default_admin_password():
return get_local_key("admin_account.admin_password", "djangoautoconf.keys_default")
def get_default_admin_username():
return get_local_key("admin_account.admin_username", "djangoautoconf.keys_default")
|
weijia/djangoautoconf
|
djangoautoconf/class_based_views/ajax_views.py
|
AjaxableViewMixin.render_to_response
|
python
|
def render_to_response(self, context, **response_kwargs):
context["ajax_form_id"] = self.ajax_form_id
# context["base_template"] = "towel_bootstrap/modal.html"
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
|
Returns a response with a template rendered with the given context.
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/class_based_views/ajax_views.py#L47-L58
|
[
"def get_template_names(self):\n return [self.ajax_base_template]\n"
] |
class AjaxableViewMixin(object):
is_ajax_view = False
ajax_form_id = "ajaxFormId"
ajax_base_template = "ajax_base.html"
def get_template_names(self):
return [self.ajax_base_template]
|
weijia/djangoautoconf
|
djangoautoconf/model_utils/model_duplicator.py
|
get_duplicated_model
|
python
|
def get_duplicated_model(class_inst, new_class_name):
# Ref: http://www.cnblogs.com/Jerryshome/archive/2012/12/21/2827492.html
# get caller stack frame
# caller_frame = inspect.currentframe()
caller_frame_record = inspect.stack()[1]
# parse module name
module = inspect.getmodule(caller_frame_record[0])
return ModelDuplicator(module.__name__).get_duplicated_model(class_inst, new_class_name)
|
Duplicate the model fields from class_inst to new_class_name, example:
NewClass = get_duplicated_model(OldClass, "NewClass")
:param class_inst:
:param new_class_name:
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/model_utils/model_duplicator.py#L11-L26
|
[
"def get_duplicated_model(self, class_inst, new_class_name):\n \"\"\"\n Duplicate the model fields from class_inst to new_class_name, example:\n NewClass = get_duplicated_model(OldClass, \"NewClass\")\n :param class_inst:\n :param new_class_name:\n :return:\n \"\"\"\n # Ref: http://www.cnblogs.com/Jerryshome/archive/2012/12/21/2827492.html\n attr_dict = {'__module__': self.module_name}\n for field in class_inst.__dict__['_meta'].fields:\n if self.is_relation_field_needed:\n attr_dict[field.name] = copy_field(field)\n elif not is_relation_field(field):\n attr_dict[field.name] = copy_field(field)\n # duplicated_model_class = type(\"Meta\", (), {\"abstract\": True})\n duplicated_model_class = type(new_class_name, (models.Model,), attr_dict)\n # The following codes are not working\n # if hasattr(class_inst, \"__str__\"):\n # setattr(duplicated_model_class, \"__str__\", getattr(class_inst, \"__str__\"))\n # if hasattr(class_inst, \"__str__\"):\n # str_func = getattr(class_inst, \"__str__\")\n # duplicated_model_class.__str__ = str_func\n return duplicated_model_class\n"
] |
import inspect
from django.db import models
from djangoautoconf.model_utils.model_attr_utils import is_relation_field
import copy
from django.db import models
__author__ = 'weijia'
# Ref:
# https://stackoverflow.com/questions/12222003/copying-a-django-field-description-from-an-existing-model-to-a-new-one
def copy_field(f):
fp = copy.copy(f)
fp.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
if hasattr(f, "model"):
del fp.attname
del fp.column
del fp.model
# you may set .name and .verbose_name to None here
fp.db_index = f.db_index
return fp
class ModelDuplicator(object):
def __init__(self, module_name=None):
super(ModelDuplicator, self).__init__()
self.is_relation_field_needed = True
if module_name is None:
caller_frame_record = inspect.stack()[1]
# parse module name
module = inspect.getmodule(caller_frame_record[0])
module_name = module.__name__
self.module_name = module_name
def get_duplicated_model(self, class_inst, new_class_name):
"""
Duplicate the model fields from class_inst to new_class_name, example:
NewClass = get_duplicated_model(OldClass, "NewClass")
:param class_inst:
:param new_class_name:
:return:
"""
# Ref: http://www.cnblogs.com/Jerryshome/archive/2012/12/21/2827492.html
attr_dict = {'__module__': self.module_name}
for field in class_inst.__dict__['_meta'].fields:
if self.is_relation_field_needed:
attr_dict[field.name] = copy_field(field)
elif not is_relation_field(field):
attr_dict[field.name] = copy_field(field)
# duplicated_model_class = type("Meta", (), {"abstract": True})
duplicated_model_class = type(new_class_name, (models.Model,), attr_dict)
# The following codes are not working
# if hasattr(class_inst, "__str__"):
# setattr(duplicated_model_class, "__str__", getattr(class_inst, "__str__"))
# if hasattr(class_inst, "__str__"):
# str_func = getattr(class_inst, "__str__")
# duplicated_model_class.__str__ = str_func
return duplicated_model_class
|
weijia/djangoautoconf
|
djangoautoconf/model_utils/model_duplicator.py
|
ModelDuplicator.get_duplicated_model
|
python
|
def get_duplicated_model(self, class_inst, new_class_name):
# Ref: http://www.cnblogs.com/Jerryshome/archive/2012/12/21/2827492.html
attr_dict = {'__module__': self.module_name}
for field in class_inst.__dict__['_meta'].fields:
if self.is_relation_field_needed:
attr_dict[field.name] = copy_field(field)
elif not is_relation_field(field):
attr_dict[field.name] = copy_field(field)
# duplicated_model_class = type("Meta", (), {"abstract": True})
duplicated_model_class = type(new_class_name, (models.Model,), attr_dict)
# The following codes are not working
# if hasattr(class_inst, "__str__"):
# setattr(duplicated_model_class, "__str__", getattr(class_inst, "__str__"))
# if hasattr(class_inst, "__str__"):
# str_func = getattr(class_inst, "__str__")
# duplicated_model_class.__str__ = str_func
return duplicated_model_class
|
Duplicate the model fields from class_inst to new_class_name, example:
NewClass = get_duplicated_model(OldClass, "NewClass")
:param class_inst:
:param new_class_name:
:return:
|
train
|
https://github.com/weijia/djangoautoconf/blob/b7dbda2287ed8cb9de6d02cb3abaaa1c36b1ced0/djangoautoconf/model_utils/model_duplicator.py#L58-L81
|
[
"def is_relation_field(field):\n if type(field) in get_relation_field_types():\n return True\n return False\n",
"def copy_field(f):\n fp = copy.copy(f)\n\n fp.creation_counter = models.Field.creation_counter\n models.Field.creation_counter += 1\n\n if hasattr(f, \"model\"):\n del fp.attname\n del fp.column\n del fp.model\n\n # you may set .name and .verbose_name to None here\n fp.db_index = f.db_index\n return fp\n"
] |
class ModelDuplicator(object):
def __init__(self, module_name=None):
super(ModelDuplicator, self).__init__()
self.is_relation_field_needed = True
if module_name is None:
caller_frame_record = inspect.stack()[1]
# parse module name
module = inspect.getmodule(caller_frame_record[0])
module_name = module.__name__
self.module_name = module_name
|
OpenGov/og-python-utils
|
ogutils/loggers/default.py
|
build_default_logger
|
python
|
def build_default_logger(
logger_name='logger',
log_level=None,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5):
'''
Generates a logger that outputs messages in the same format as default Flask applications.
'''
old_logger_class = logging.getLoggerClass()
logging.setLoggerClass(DefaultLogger)
logger = logging.getLogger(logger_name)
logging.setLoggerClass(old_logger_class)
if log_level:
logger.setLevel(log_level)
logger.apply_default_handlers(log_dir, console_enabled, max_log_size, max_backup_logs)
return logger
|
Generates a logger that outputs messages in the same format as default Flask applications.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/loggers/default.py#L117-L135
| null |
import os
import sys
import logging
import logging.handlers
DEFAULT_LOGGER_FORMAT = logging.Formatter('[%(asctime)s] %(message)s')
NO_FILE_INDICATOR = {'file': False}
NO_CONSOLE_INDICATOR = {'console': False}
class ExtraAttributeLogger(logging.Logger):
def __init__(self, name, level=logging.NOTSET, extra_record_args=None):
logging.Logger.__init__(self, name, level=level)
self.extra_record_args = extra_record_args or {}
def _log(self, level, msg, args, exc_info=None, extra=None):
if extra is None:
extra = {}
extra = dict(self.extra_record_args, **extra)
logging.Logger._log(self, level, msg, args, exc_info=exc_info, extra=extra)
class DebugEvalLogger(logging.Logger):
def debug_generate(self, debug_generator, *gen_args, **gen_kwargs):
'''
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
'''
if self.isEnabledFor(logging.DEBUG):
message = debug_generator(*gen_args, **gen_kwargs)
# Allow for content filtering to skip logging
if message is not None:
return self.debug(message)
class FileFilter(logging.Filter):
def filter(self, record):
return getattr(record, 'file', True)
class ConsoleFilter(logging.Filter):
def filter(self, record):
return getattr(record, 'console', True)
class SubErrorFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
class StdOutForwarder(object):
'''
Used to forward content to the current sys.stdout. This allows for rebinding sys.stdout without
remapping associations in loggers
'''
def write(self, content):
sys.stdout.write(content)
def flush(self):
sys.stdout.flush()
def isatty(self):
return sys.stdout.isatty()
class StdErrForwarder(object):
'''
Used to forward content to the current sys.stdout. This allows for rebinding sys.stdout without
remapping associations in loggers
'''
def write(self, content):
sys.stderr.write(content)
def flush(self):
sys.stderr.flush()
def isatty(self):
return sys.stderr.isatty()
class DefaultLogger(ExtraAttributeLogger, DebugEvalLogger):
def apply_default_handlers(
self,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5,
formatter=DEFAULT_LOGGER_FORMAT):
if console_enabled:
stdout_handler = logging.StreamHandler(StdOutForwarder())
stdout_handler.setFormatter(formatter)
stdout_handler.addFilter(ConsoleFilter())
stdout_handler.addFilter(SubErrorFilter())
self.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(StdErrForwarder())
stderr_handler.setLevel(logging.ERROR)
stderr_handler.setFormatter(formatter)
stderr_handler.addFilter(ConsoleFilter())
self.addHandler(stderr_handler)
if log_dir:
file_handler = logging.handlers.RotatingFileHandler(os.path.join(log_dir, 'console.log'),
maxBytes=max_log_size, backupCount=max_backup_logs)
file_handler.setFormatter(formatter)
file_handler.addFilter(FileFilter())
self.addHandler(file_handler)
error_file_handler = logging.handlers.RotatingFileHandler(
os.path.join(log_dir, 'errors.log'),
maxBytes=max_log_size,
backupCount=max_backup_logs)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
error_file_handler.addFilter(FileFilter())
self.addHandler(error_file_handler)
# Overwrites the log each run
session_file_handler = logging.FileHandler(os.path.join(log_dir, 'session.log'), mode='wb')
session_file_handler.setFormatter(formatter)
session_file_handler.addFilter(FileFilter())
self.addHandler(session_file_handler)
def build_default_logger(
logger_name='logger',
log_level=None,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5):
'''
Generates a logger that outputs messages in the same format as default Flask applications.
'''
old_logger_class = logging.getLoggerClass()
logging.setLoggerClass(DefaultLogger)
logger = logging.getLogger(logger_name)
logging.setLoggerClass(old_logger_class)
if log_level:
logger.setLevel(log_level)
logger.apply_default_handlers(log_dir, console_enabled, max_log_size, max_backup_logs)
return logger
|
OpenGov/og-python-utils
|
ogutils/loggers/default.py
|
DebugEvalLogger.debug_generate
|
python
|
def debug_generate(self, debug_generator, *gen_args, **gen_kwargs):
'''
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
'''
if self.isEnabledFor(logging.DEBUG):
message = debug_generator(*gen_args, **gen_kwargs)
# Allow for content filtering to skip logging
if message is not None:
return self.debug(message)
|
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/loggers/default.py#L22-L31
| null |
class DebugEvalLogger(logging.Logger):
def debug_generate(self, debug_generator, *gen_args, **gen_kwargs):
'''
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
'''
if self.isEnabledFor(logging.DEBUG):
message = debug_generator(*gen_args, **gen_kwargs)
# Allow for content filtering to skip logging
if message is not None:
return self.debug(message)
|
OpenGov/og-python-utils
|
ogutils/collections/checks.py
|
any_shared
|
python
|
def any_shared(enum_one, enum_two):
'''
Truthy if any element in enum_one is present in enum_two
'''
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one)
|
Truthy if any element in enum_one is present in enum_two
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/checks.py#L13-L21
|
[
"def is_collection(elem):\n '''\n Truthy if the argument is a collection object\n '''\n return hasattr(elem, '__iter__') or hasattr(elem, '__getitem__')\n"
] |
def is_collection(elem):
'''
Truthy if the argument is a collection object
'''
return hasattr(elem, '__iter__') or hasattr(elem, '__getitem__')
def is_empty(elem):
'''
Truthy if the argument is empty, but falsy for non-collection falsy elements
'''
return is_collection(elem) and not any(True for _ in elem)
def any_shared(enum_one, enum_two):
'''
Truthy if any element in enum_one is present in enum_two
'''
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one)
|
OpenGov/og-python-utils
|
ogutils/functions/decorators.py
|
listify
|
python
|
def listify(generator_func):
'''
Converts generator functions into list returning functions.
@listify
def test():
yield 1
test()
# => [1]
'''
def list_func(*args, **kwargs):
return degenerate(generator_func(*args, **kwargs))
return list_func
|
Converts generator functions into list returning functions.
@listify
def test():
yield 1
test()
# => [1]
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/functions/decorators.py#L3-L15
| null |
from ..collections.transformations import degenerate
|
OpenGov/og-python-utils
|
ogutils/functions/operators.py
|
restrict_args
|
python
|
def restrict_args(func, *args, **kwargs):
'''
Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1
'''
callargs = getargspec(func)
if not callargs.varargs:
args = args[0:len(callargs.args)]
return func(*args, **kwargs)
|
Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/functions/operators.py#L3-L13
|
[
"self.assertEquals(operators.restrict_args(lambda: '', 'a', 'b'), '')\n",
"self.assertEquals(operators.restrict_args(lambda a: a, 'a'), 'a')\n",
"self.assertEquals(operators.restrict_args(lambda a: a, 'a', 'b'), 'a')\n",
"self.assertEquals(operators.restrict_args(lambda a, b: a + b, 'a', 'b'), 'ab')\n",
"self.assertEquals(operators.restrict_args(lambda a: a, a='a'), 'a')\n",
"self.assertEquals(operators.restrict_args(lambda a, b: a + b, 'a', b='b'), 'ab')\n",
"self.assertEquals(operators.restrict_args(lambda a, b: a + b, a='a', b='b'), 'ab')\n",
"operators.restrict_args(lambda: '', a='a')\n",
"operators.restrict_args(lambda a: a, 'a', b='b')\n",
"operators.restrict_args(lambda a: a, 'a', a='a')\n",
"operators.restrict_args(lambda a: a)\n",
"operators.restrict_args(lambda a, b: a + b, 'a')\n",
"operators.restrict_args(lambda a, b: a + b, a='a')\n",
"operators.restrict_args(lambda a, b: a + b, b='b')\n"
] |
from inspect import getargspec
def restrict_args(func, *args, **kwargs):
'''
Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1
'''
callargs = getargspec(func)
if not callargs.varargs:
args = args[0:len(callargs.args)]
return func(*args, **kwargs)
def repeat_call(func, retries, *args, **kwargs):
'''
Tries a total of 'retries' times to execute callable before failing.
'''
retries = max(0, int(retries))
try_num = 0
while True:
if try_num == retries:
return func(*args, **kwargs)
else:
try:
return func(*args, **kwargs)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
try_num += 1
|
OpenGov/og-python-utils
|
ogutils/functions/operators.py
|
repeat_call
|
python
|
def repeat_call(func, retries, *args, **kwargs):
'''
Tries a total of 'retries' times to execute callable before failing.
'''
retries = max(0, int(retries))
try_num = 0
while True:
if try_num == retries:
return func(*args, **kwargs)
else:
try:
return func(*args, **kwargs)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
try_num += 1
|
Tries a total of 'retries' times to execute callable before failing.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/functions/operators.py#L15-L30
|
[
"return repeat_call(lambda: urllib2.urlopen(req).read(), retries)\n",
"return repeat_call(lambda: json.loads(urllib2.urlopen(req).read()), retries)\n",
"def fail_count_down():\n if fail_count_down.fail_count == 0:\n return 'success'\n else:\n fail_count_down.fail_count -= 1\n raise TypeError('fail')\n",
"self.assertEquals(operators.repeat_call(lambda: 'success', 0), 'success')\n",
"def fail_count_down(*args, **kwargs):\n if fail_count_down.fail_count == 0:\n return (args, kwargs)\n else:\n fail_count_down.fail_count -= 1\n raise TypeError('fail')\n",
"self.assertEquals(operators.repeat_call(lambda a: a, 0, 'success'), 'success')\n"
] |
from inspect import getargspec
def restrict_args(func, *args, **kwargs):
'''
Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1
'''
callargs = getargspec(func)
if not callargs.varargs:
args = args[0:len(callargs.args)]
return func(*args, **kwargs)
def repeat_call(func, retries, *args, **kwargs):
'''
Tries a total of 'retries' times to execute callable before failing.
'''
retries = max(0, int(retries))
try_num = 0
while True:
if try_num == retries:
return func(*args, **kwargs)
else:
try:
return func(*args, **kwargs)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise e
try_num += 1
|
OpenGov/og-python-utils
|
ogutils/web/operators.py
|
repeat_read_url_request
|
python
|
def repeat_read_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times
'''
if logger:
logger.debug("Retrieving url content: {}".format(url))
req = urllib2.Request(url, data, headers=headers or {})
return repeat_call(lambda: urllib2.urlopen(req).read(), retries)
|
Allows for repeated http requests up to retries additional times
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/web/operators.py#L6-L13
|
[
"def repeat_call(func, retries, *args, **kwargs):\n '''\n Tries a total of 'retries' times to execute callable before failing.\n '''\n retries = max(0, int(retries))\n try_num = 0\n while True:\n if try_num == retries:\n return func(*args, **kwargs)\n else:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if isinstance(e, KeyboardInterrupt):\n raise e\n try_num += 1\n"
] |
import urllib2
import json
from ..functions.operators import repeat_call
def repeat_read_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times
'''
if logger:
logger.debug("Retrieving url content: {}".format(url))
req = urllib2.Request(url, data, headers=headers or {})
return repeat_call(lambda: urllib2.urlopen(req).read(), retries)
def repeat_read_json_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times with convienence
wrapper on jsonization of response
'''
if logger:
logger.debug("Retrieving url json content: {}".format(url))
req = urllib2.Request(url, data=data, headers=headers or {})
return repeat_call(lambda: json.loads(urllib2.urlopen(req).read()), retries)
|
OpenGov/og-python-utils
|
ogutils/web/operators.py
|
repeat_read_json_url_request
|
python
|
def repeat_read_json_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times with convienence
wrapper on jsonization of response
'''
if logger:
logger.debug("Retrieving url json content: {}".format(url))
req = urllib2.Request(url, data=data, headers=headers or {})
return repeat_call(lambda: json.loads(urllib2.urlopen(req).read()), retries)
|
Allows for repeated http requests up to retries additional times with convienence
wrapper on jsonization of response
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/web/operators.py#L15-L23
|
[
"def repeat_call(func, retries, *args, **kwargs):\n '''\n Tries a total of 'retries' times to execute callable before failing.\n '''\n retries = max(0, int(retries))\n try_num = 0\n while True:\n if try_num == retries:\n return func(*args, **kwargs)\n else:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if isinstance(e, KeyboardInterrupt):\n raise e\n try_num += 1\n"
] |
import urllib2
import json
from ..functions.operators import repeat_call
def repeat_read_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times
'''
if logger:
logger.debug("Retrieving url content: {}".format(url))
req = urllib2.Request(url, data, headers=headers or {})
return repeat_call(lambda: urllib2.urlopen(req).read(), retries)
def repeat_read_json_url_request(url, headers=None, data=None, retries=2, logger=None):
'''
Allows for repeated http requests up to retries additional times with convienence
wrapper on jsonization of response
'''
if logger:
logger.debug("Retrieving url json content: {}".format(url))
req = urllib2.Request(url, data=data, headers=headers or {})
return repeat_call(lambda: json.loads(urllib2.urlopen(req).read()), retries)
|
OpenGov/og-python-utils
|
ogutils/checks.py
|
booleanize
|
python
|
def booleanize(truthy):
'''
Smartly converts argument to true or false. Strings and variants of
'true' and 'false' convert to appropriate types, along with normal
bool() like conversions.
'''
if truthy is None:
return False
elif isinstance(truthy, basestring):
if truthy:
try:
return bool(distutils.util.strtobool(truthy))
except ValueError:
return True
else:
return False
elif is_collection(truthy):
return not is_empty(truthy)
else:
return bool(truthy)
|
Smartly converts argument to true or false. Strings and variants of
'true' and 'false' convert to appropriate types, along with normal
bool() like conversions.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/checks.py#L4-L23
|
[
"def is_collection(elem):\n '''\n Truthy if the argument is a collection object\n '''\n return hasattr(elem, '__iter__') or hasattr(elem, '__getitem__')\n",
"def is_empty(elem):\n '''\n Truthy if the argument is empty, but falsy for non-collection falsy elements\n '''\n return is_collection(elem) and not any(True for _ in elem)\n"
] |
import distutils
from .collections.checks import is_collection, is_empty
def booleanize(truthy):
'''
Smartly converts argument to true or false. Strings and variants of
'true' and 'false' convert to appropriate types, along with normal
bool() like conversions.
'''
if truthy is None:
return False
elif isinstance(truthy, basestring):
if truthy:
try:
return bool(distutils.util.strtobool(truthy))
except ValueError:
return True
else:
return False
elif is_collection(truthy):
return not is_empty(truthy)
else:
return bool(truthy)
|
OpenGov/og-python-utils
|
ogutils/loggers/flask.py
|
build_flask_like_logger
|
python
|
def build_flask_like_logger(
logger_name='logger',
log_level=None,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5,
host=None):
'''
Generates a logger that outputs messages in the same format as default Flask applications.
'''
old_logger_class = logging.getLoggerClass()
logging.setLoggerClass(FlaskLikeLogger)
logger = logging.getLogger(logger_name)
logging.setLoggerClass(old_logger_class)
if log_level:
logger.setLevel(log_level)
if host:
logger.set_host(host)
logger.apply_default_handlers(log_dir, console_enabled, max_log_size, max_backup_logs)
return logger
|
Generates a logger that outputs messages in the same format as default Flask applications.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/loggers/flask.py#L100-L121
| null |
import sys
import time
import logging
import logging.handlers
from .default import DefaultLogger
MONTH_NAMES = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class OrderedFormatsFormatter(logging.Formatter):
'''
Allows for multiple _fmt to be applied in order until all their keys are
accounted for and processed.
OrderedFormatsFormatter(['%(host)s -- %(message)s', '%(message)s'])
creates a formatter that will first try to format a host into the message
and default to just message if the 'host' argument is not present in the
record.
'''
def usesTime(self, fmt=None):
'''
Check if the format uses the creation time of the record.
'''
if fmt is None:
fmt = self._fmt
if not isinstance(fmt, basestring):
fmt = fmt[0]
return fmt.find('%(asctime)') >= 0
def format(self, record):
# Copied from logger default formatter with array of formats awareness
record.message = record.getMessage()
fmts = self._fmt
if isinstance(fmts, basestring):
fmts = [fmts]
for fmt in fmts:
if self.usesTime(fmt):
record.asctime = self.formatTime(record, self.datefmt)
try:
s = fmt % record.__dict__
break
except KeyError:
continue
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != '\n':
s = s + '\n'
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')
return s
class FlaskStyleTimeFormatter(OrderedFormatsFormatter):
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
year, month, day, hh, mm, ss, x, y, z = ct
if datefmt:
s = time.strftime(datefmt, ct)
else:
s = '%02d/%3s/%04d %02d:%02d:%02d' % (day, MONTH_NAMES[month], year, hh, mm, ss)
return s
FLASK_FORMAT = FlaskStyleTimeFormatter(
['%(host)s - - [%(asctime)s] %(message)s',
'[%(asctime)s] %(message)s',
'%(message)s'])
class FlaskLikeLogger(DefaultLogger):
def set_host(self, host):
self.extra_record_args['host'] = host
def apply_default_handlers(
self,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5,
formatter=FLASK_FORMAT):
DefaultLogger.apply_default_handlers(
self,
log_dir,
console_enabled,
max_log_size,
max_backup_logs,
formatter)
def build_flask_like_logger(
logger_name='logger',
log_level=None,
log_dir=None,
console_enabled=True,
max_log_size=5*1024*1024,
max_backup_logs=5,
host=None):
'''
Generates a logger that outputs messages in the same format as default Flask applications.
'''
old_logger_class = logging.getLoggerClass()
logging.setLoggerClass(FlaskLikeLogger)
logger = logging.getLogger(logger_name)
logging.setLoggerClass(old_logger_class)
if log_level:
logger.setLevel(log_level)
if host:
logger.set_host(host)
logger.apply_default_handlers(log_dir, console_enabled, max_log_size, max_backup_logs)
return logger
|
OpenGov/og-python-utils
|
ogutils/loggers/flask.py
|
OrderedFormatsFormatter.usesTime
|
python
|
def usesTime(self, fmt=None):
'''
Check if the format uses the creation time of the record.
'''
if fmt is None:
fmt = self._fmt
if not isinstance(fmt, basestring):
fmt = fmt[0]
return fmt.find('%(asctime)') >= 0
|
Check if the format uses the creation time of the record.
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/loggers/flask.py#L20-L28
| null |
class OrderedFormatsFormatter(logging.Formatter):
'''
Allows for multiple _fmt to be applied in order until all their keys are
accounted for and processed.
OrderedFormatsFormatter(['%(host)s -- %(message)s', '%(message)s'])
creates a formatter that will first try to format a host into the message
and default to just message if the 'host' argument is not present in the
record.
'''
def usesTime(self, fmt=None):
'''
Check if the format uses the creation time of the record.
'''
if fmt is None:
fmt = self._fmt
if not isinstance(fmt, basestring):
fmt = fmt[0]
return fmt.find('%(asctime)') >= 0
def format(self, record):
# Copied from logger default formatter with array of formats awareness
record.message = record.getMessage()
fmts = self._fmt
if isinstance(fmts, basestring):
fmts = [fmts]
for fmt in fmts:
if self.usesTime(fmt):
record.asctime = self.formatTime(record, self.datefmt)
try:
s = fmt % record.__dict__
break
except KeyError:
continue
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != '\n':
s = s + '\n'
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')
return s
|
OpenGov/og-python-utils
|
ogutils/collections/operators.py
|
apply_dict_default
|
python
|
def apply_dict_default(dictionary, arg, default):
'''
Used to avoid generating a defaultdict object, or assigning defaults to a dict-like object
apply_dict_default({}, 'test', list)
# => {'test': []}
apply_dict_default({'test': 'ok'}, 'test', list)
# => {'test': 'ok'}
'''
if arg not in dictionary:
if hasattr(default, '__call__'):
# Don't try/catch because the method could raise a TypeError and we'd hide it
default = restrict_args(default, arg)
dictionary[arg] = default
return dictionary
|
Used to avoid generating a defaultdict object, or assigning defaults to a dict-like object
apply_dict_default({}, 'test', list)
# => {'test': []}
apply_dict_default({'test': 'ok'}, 'test', list)
# => {'test': 'ok'}
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/operators.py#L3-L18
|
[
"def restrict_args(func, *args, **kwargs):\n '''\n Restricts the possible arguements to a method to match the func argument.\n\n restrict_args(lambda a: a, 1, 2)\n # => 1\n '''\n callargs = getargspec(func)\n if not callargs.varargs:\n args = args[0:len(callargs.args)]\n return func(*args, **kwargs)\n"
] |
from ..functions.operators import restrict_args
def apply_dict_default(dictionary, arg, default):
'''
Used to avoid generating a defaultdict object, or assigning defaults to a dict-like object
apply_dict_default({}, 'test', list)
# => {'test': []}
apply_dict_default({'test': 'ok'}, 'test', list)
# => {'test': 'ok'}
'''
if arg not in dictionary:
if hasattr(default, '__call__'):
# Don't try/catch because the method could raise a TypeError and we'd hide it
default = restrict_args(default, arg)
dictionary[arg] = default
return dictionary
|
OpenGov/og-python-utils
|
ogutils/text/regex.py
|
chain_sub_regexes
|
python
|
def chain_sub_regexes(phrase, *regex_sub_pairs):
'''
Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo'
'''
for regex, substitution in regex_sub_pairs:
if isinstance(regex, basestring):
regex = re.compile(regex)
phrase = regex.sub(substitution, phrase)
return phrase
|
Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo'
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/text/regex.py#L3-L14
| null |
import re
def chain_sub_regexes(phrase, *regex_sub_pairs):
'''
Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo'
'''
for regex, substitution in regex_sub_pairs:
if isinstance(regex, basestring):
regex = re.compile(regex)
phrase = regex.sub(substitution, phrase)
return phrase
|
OpenGov/og-python-utils
|
ogutils/collections/transformations.py
|
recursive_iter
|
python
|
def recursive_iter(enumerables):
'''
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
'''
if not is_collection(enumerables) or isinstance(enumerables, (basestring, dict)):
yield enumerables
else:
for elem in enumerables:
for sub_elem in recursive_iter(elem):
yield sub_elem
|
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/transformations.py#L5-L17
|
[
"def is_collection(elem):\n '''\n Truthy if the argument is a collection object\n '''\n return hasattr(elem, '__iter__') or hasattr(elem, '__getitem__')\n"
] |
import inspect
from checks import is_collection
from itertools import islice
def recursive_iter(enumerables):
'''
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
'''
if not is_collection(enumerables) or isinstance(enumerables, (basestring, dict)):
yield enumerables
else:
for elem in enumerables:
for sub_elem in recursive_iter(elem):
yield sub_elem
def flatten(enumerable):
'''
Converts nested list-like elements into a single list
flatten([[1,2], 3])
# => [1, 2, 3]
'''
return list(recursive_iter(enumerable))
def degenerate(enumerable):
'''
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
'''
if (isinstance(enumerable, xrange) or
inspect.isgeneratorfunction(enumerable) or
inspect.isgenerator(enumerable)):
return list(enumerable)
return enumerable
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged
def batch(enumerable, batch_size):
'''
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
'''
batch_size = max(int(batch_size), 1)
try:
enumerable.__getitem__
total_size = len(enumerable)
except (TypeError, AttributeError):
enumerable = list(enumerable)
total_size = len(enumerable)
if total_size == 0:
yield tuple()
try:
for batch_index in xrange(0, total_size, batch_size):
yield enumerable[batch_index:min(batch_index + batch_size, total_size)]
except TypeError:
# Fall back on islice, though it's not as efficient the way we're using it
for batch_start in xrange(0, total_size, batch_size):
yield tuple(islice(enumerable, batch_start, min(batch_start + batch_size, total_size)))
|
OpenGov/og-python-utils
|
ogutils/collections/transformations.py
|
degenerate
|
python
|
def degenerate(enumerable):
'''
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
'''
if (isinstance(enumerable, xrange) or
inspect.isgeneratorfunction(enumerable) or
inspect.isgenerator(enumerable)):
return list(enumerable)
return enumerable
|
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/transformations.py#L28-L39
| null |
import inspect
from checks import is_collection
from itertools import islice
def recursive_iter(enumerables):
'''
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
'''
if not is_collection(enumerables) or isinstance(enumerables, (basestring, dict)):
yield enumerables
else:
for elem in enumerables:
for sub_elem in recursive_iter(elem):
yield sub_elem
def flatten(enumerable):
'''
Converts nested list-like elements into a single list
flatten([[1,2], 3])
# => [1, 2, 3]
'''
return list(recursive_iter(enumerable))
def degenerate(enumerable):
'''
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
'''
if (isinstance(enumerable, xrange) or
inspect.isgeneratorfunction(enumerable) or
inspect.isgenerator(enumerable)):
return list(enumerable)
return enumerable
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged
def batch(enumerable, batch_size):
'''
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
'''
batch_size = max(int(batch_size), 1)
try:
enumerable.__getitem__
total_size = len(enumerable)
except (TypeError, AttributeError):
enumerable = list(enumerable)
total_size = len(enumerable)
if total_size == 0:
yield tuple()
try:
for batch_index in xrange(0, total_size, batch_size):
yield enumerable[batch_index:min(batch_index + batch_size, total_size)]
except TypeError:
# Fall back on islice, though it's not as efficient the way we're using it
for batch_start in xrange(0, total_size, batch_size):
yield tuple(islice(enumerable, batch_start, min(batch_start + batch_size, total_size)))
|
OpenGov/og-python-utils
|
ogutils/collections/transformations.py
|
merge_dicts
|
python
|
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged
|
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/transformations.py#L41-L57
| null |
import inspect
from checks import is_collection
from itertools import islice
def recursive_iter(enumerables):
'''
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
'''
if not is_collection(enumerables) or isinstance(enumerables, (basestring, dict)):
yield enumerables
else:
for elem in enumerables:
for sub_elem in recursive_iter(elem):
yield sub_elem
def flatten(enumerable):
'''
Converts nested list-like elements into a single list
flatten([[1,2], 3])
# => [1, 2, 3]
'''
return list(recursive_iter(enumerable))
def degenerate(enumerable):
'''
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
'''
if (isinstance(enumerable, xrange) or
inspect.isgeneratorfunction(enumerable) or
inspect.isgenerator(enumerable)):
return list(enumerable)
return enumerable
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged
def batch(enumerable, batch_size):
'''
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
'''
batch_size = max(int(batch_size), 1)
try:
enumerable.__getitem__
total_size = len(enumerable)
except (TypeError, AttributeError):
enumerable = list(enumerable)
total_size = len(enumerable)
if total_size == 0:
yield tuple()
try:
for batch_index in xrange(0, total_size, batch_size):
yield enumerable[batch_index:min(batch_index + batch_size, total_size)]
except TypeError:
# Fall back on islice, though it's not as efficient the way we're using it
for batch_start in xrange(0, total_size, batch_size):
yield tuple(islice(enumerable, batch_start, min(batch_start + batch_size, total_size)))
|
OpenGov/og-python-utils
|
ogutils/collections/transformations.py
|
batch
|
python
|
def batch(enumerable, batch_size):
'''
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
'''
batch_size = max(int(batch_size), 1)
try:
enumerable.__getitem__
total_size = len(enumerable)
except (TypeError, AttributeError):
enumerable = list(enumerable)
total_size = len(enumerable)
if total_size == 0:
yield tuple()
try:
for batch_index in xrange(0, total_size, batch_size):
yield enumerable[batch_index:min(batch_index + batch_size, total_size)]
except TypeError:
# Fall back on islice, though it's not as efficient the way we're using it
for batch_start in xrange(0, total_size, batch_size):
yield tuple(islice(enumerable, batch_start, min(batch_start + batch_size, total_size)))
|
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
|
train
|
https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/transformations.py#L59-L82
| null |
import inspect
from checks import is_collection
from itertools import islice
def recursive_iter(enumerables):
'''
Walks nested list-like elements as though they were sequentially available
recursive_iter([[1,2], 3])
# => 1, 2, 3
'''
if not is_collection(enumerables) or isinstance(enumerables, (basestring, dict)):
yield enumerables
else:
for elem in enumerables:
for sub_elem in recursive_iter(elem):
yield sub_elem
def flatten(enumerable):
'''
Converts nested list-like elements into a single list
flatten([[1,2], 3])
# => [1, 2, 3]
'''
return list(recursive_iter(enumerable))
def degenerate(enumerable):
'''
Converts generators to lists
degenerate(xrange(2))
# => [0, 1]
'''
if (isinstance(enumerable, xrange) or
inspect.isgeneratorfunction(enumerable) or
inspect.isgenerator(enumerable)):
return list(enumerable)
return enumerable
def merge_dicts(*dicts, **copy_check):
'''
Combines dictionaries into a single dictionary. If the 'copy' keyword is passed
then the first dictionary is copied before update.
merge_dicts({'a': 1, 'c': 1}, {'a': 2, 'b': 1})
# => {'a': 2, 'b': 1, 'c': 1}
'''
merged = {}
if not dicts:
return merged
for index, merge_dict in enumerate(dicts):
if index == 0 and not copy_check.get('copy'):
merged = merge_dict
else:
merged.update(merge_dict)
return merged
def batch(enumerable, batch_size):
'''
Breaks enumerable argument into batch size enumerable pieces. The last chunk can
be of any length up to batch_size.
batch(xrange(5), 3)
# => [0, 1, 2], [3, 4]
'''
batch_size = max(int(batch_size), 1)
try:
enumerable.__getitem__
total_size = len(enumerable)
except (TypeError, AttributeError):
enumerable = list(enumerable)
total_size = len(enumerable)
if total_size == 0:
yield tuple()
try:
for batch_index in xrange(0, total_size, batch_size):
yield enumerable[batch_index:min(batch_index + batch_size, total_size)]
except TypeError:
# Fall back on islice, though it's not as efficient the way we're using it
for batch_start in xrange(0, total_size, batch_size):
yield tuple(islice(enumerable, batch_start, min(batch_start + batch_size, total_size)))
|
rohankapoorcom/zm-py
|
zoneminder/run_state.py
|
RunState.active
|
python
|
def active(self) -> bool:
states = self._client.get_state(self._state_url)['states']
for state in states:
state = state['State']
if int(state['Id']) == self._state_id:
# yes, the ZM API uses the *string* "1" for this...
return state['IsActive'] == "1"
return False
|
Indicate if this RunState is currently active.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/run_state.py#L26-L34
| null |
class RunState:
"""Represents a Run State from ZoneMinder."""
def __init__(self, client, raw_state):
"""Create a new RunState."""
self._client = client
self._state_id = int(raw_state['Id'])
self._state_url = 'api/states.json'
self._name = raw_state['Name']
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this RunState."""
# pylint: disable=invalid-name
return self._state_id
@property
def name(self) -> str:
"""Get the name of this RunState."""
return self._name
@property
def activate(self):
"""Activate this RunState."""
self._client.set_active_state(self._name)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.login
|
python
|
def login(self):
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
|
Login to the ZoneMinder API.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L35-L62
| null |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder._zm_request
|
python
|
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
|
Perform a request to the ZoneMinder API.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L72-L100
|
[
"def login(self):\n \"\"\"Login to the ZoneMinder API.\"\"\"\n _LOGGER.debug(\"Attempting to login to ZoneMinder\")\n\n login_post = {'view': 'console', 'action': 'login'}\n if self._username:\n login_post['username'] = self._username\n if self._password:\n login_post['password'] = self._password\n\n req = requests.post(urljoin(self._server_url, 'index.php'),\n data=login_post, verify=self._verify_ssl)\n self._cookies = req.cookies\n\n # Login calls returns a 200 response on both failure and success.\n # The only way to tell if you logged in correctly is to issue an api\n # call.\n req = requests.get(\n urljoin(self._server_url, 'api/host/getVersion.json'),\n cookies=self._cookies,\n timeout=ZoneMinder.DEFAULT_TIMEOUT,\n verify=self._verify_ssl)\n\n if not req.ok:\n _LOGGER.error(\"Connection error logging into ZoneMinder\")\n return False\n\n return True\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.get_monitors
|
python
|
def get_monitors(self) -> List[Monitor]:
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
|
Get a list of Monitors from the ZoneMinder API.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L102-L115
|
[
"def _zm_request(self, method, api_url, data=None,\n timeout=DEFAULT_TIMEOUT) -> dict:\n \"\"\"Perform a request to the ZoneMinder API.\"\"\"\n try:\n # Since the API uses sessions that expire, sometimes we need to\n # re-auth if the call fails.\n for _ in range(ZoneMinder.LOGIN_RETRIES):\n req = requests.request(\n method, urljoin(self._server_url, api_url), data=data,\n cookies=self._cookies, timeout=timeout,\n verify=self._verify_ssl)\n\n if not req.ok:\n self.login()\n else:\n break\n\n else:\n _LOGGER.error('Unable to get API response from ZoneMinder')\n\n try:\n return req.json()\n except ValueError:\n _LOGGER.exception('JSON decode exception caught while'\n 'attempting to decode \"%s\"', req.text)\n return {}\n except requests.exceptions.ConnectionError:\n _LOGGER.exception('Unable to connect to ZoneMinder')\n return {}\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.get_run_states
|
python
|
def get_run_states(self) -> List[RunState]:
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
|
Get a list of RunStates from the ZoneMinder API.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L117-L130
|
[
"def get_state(self, api_url) -> dict:\n \"\"\"Perform a GET request on the specified ZoneMinder API URL.\"\"\"\n return self._zm_request('get', api_url)\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.get_active_state
|
python
|
def get_active_state(self) -> Optional[str]:
for state in self.get_run_states():
if state.active:
return state.name
return None
|
Get the name of the active run state from the ZoneMinder API.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L132-L137
|
[
"def get_run_states(self) -> List[RunState]:\n \"\"\"Get a list of RunStates from the ZoneMinder API.\"\"\"\n raw_states = self.get_state('api/states.json')\n if not raw_states:\n _LOGGER.warning(\"Could not fetch runstates from ZoneMinder\")\n return []\n\n run_states = []\n for i in raw_states['states']:\n raw_state = i['State']\n _LOGGER.info(\"Initializing runstate %s\", raw_state['Id'])\n run_states.append(RunState(self, raw_state))\n\n return run_states\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.set_active_state
|
python
|
def set_active_state(self, state_name):
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
|
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L139-L152
|
[
"def _zm_request(self, method, api_url, data=None,\n timeout=DEFAULT_TIMEOUT) -> dict:\n \"\"\"Perform a request to the ZoneMinder API.\"\"\"\n try:\n # Since the API uses sessions that expire, sometimes we need to\n # re-auth if the call fails.\n for _ in range(ZoneMinder.LOGIN_RETRIES):\n req = requests.request(\n method, urljoin(self._server_url, api_url), data=data,\n cookies=self._cookies, timeout=timeout,\n verify=self._verify_ssl)\n\n if not req.ok:\n self.login()\n else:\n break\n\n else:\n _LOGGER.error('Unable to get API response from ZoneMinder')\n\n try:\n return req.json()\n except ValueError:\n _LOGGER.exception('JSON decode exception caught while'\n 'attempting to decode \"%s\"', req.text)\n return {}\n except requests.exceptions.ConnectionError:\n _LOGGER.exception('Unable to connect to ZoneMinder')\n return {}\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.get_url_with_auth
|
python
|
def get_url_with_auth(self, url) -> str:
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
|
Add the auth credentials to a url (if needed).
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L158-L168
| null |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder.is_available
|
python
|
def is_available(self) -> bool:
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
|
Indicate if this ZoneMinder service is currently available.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L171-L180
|
[
"def get_state(self, api_url) -> dict:\n \"\"\"Perform a GET request on the specified ZoneMinder API URL.\"\"\"\n return self._zm_request('get', api_url)\n"
] |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
def _build_server_url(server_host, server_path) -> str:
"""Build the server url making sure it ends in a trailing slash."""
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
rohankapoorcom/zm-py
|
zoneminder/zm.py
|
ZoneMinder._build_server_url
|
python
|
def _build_server_url(server_host, server_path) -> str:
server_url = urljoin(server_host, server_path)
if server_url[-1] == '/':
return server_url
return '{}/'.format(server_url)
|
Build the server url making sure it ends in a trailing slash.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/zm.py#L193-L198
| null |
class ZoneMinder:
"""The ZoneMinder API client itself. Create one of these to begin."""
DEFAULT_SERVER_PATH = '/zm/'
DEFAULT_ZMS_PATH = '/zm/cgi-bin/nph-zms'
DEFAULT_TIMEOUT = 10
LOGIN_RETRIES = 2
MONITOR_URL = 'api/monitors.json'
def __init__(self, server_host, username, password,
server_path=DEFAULT_SERVER_PATH,
zms_path=DEFAULT_ZMS_PATH, verify_ssl=True) -> None:
"""Create a ZoneMinder API Client."""
self._server_url = ZoneMinder._build_server_url(server_host,
server_path)
self._zms_url = ZoneMinder._build_zms_url(server_host, zms_path)
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._cookies = None
def login(self):
"""Login to the ZoneMinder API."""
_LOGGER.debug("Attempting to login to ZoneMinder")
login_post = {'view': 'console', 'action': 'login'}
if self._username:
login_post['username'] = self._username
if self._password:
login_post['password'] = self._password
req = requests.post(urljoin(self._server_url, 'index.php'),
data=login_post, verify=self._verify_ssl)
self._cookies = req.cookies
# Login calls returns a 200 response on both failure and success.
# The only way to tell if you logged in correctly is to issue an api
# call.
req = requests.get(
urljoin(self._server_url, 'api/host/getVersion.json'),
cookies=self._cookies,
timeout=ZoneMinder.DEFAULT_TIMEOUT,
verify=self._verify_ssl)
if not req.ok:
_LOGGER.error("Connection error logging into ZoneMinder")
return False
return True
def get_state(self, api_url) -> dict:
"""Perform a GET request on the specified ZoneMinder API URL."""
return self._zm_request('get', api_url)
def change_state(self, api_url, post_data) -> dict:
"""Perform a POST request on the specific ZoneMinder API Url."""
return self._zm_request('post', api_url, post_data)
def _zm_request(self, method, api_url, data=None,
timeout=DEFAULT_TIMEOUT) -> dict:
"""Perform a request to the ZoneMinder API."""
try:
# Since the API uses sessions that expire, sometimes we need to
# re-auth if the call fails.
for _ in range(ZoneMinder.LOGIN_RETRIES):
req = requests.request(
method, urljoin(self._server_url, api_url), data=data,
cookies=self._cookies, timeout=timeout,
verify=self._verify_ssl)
if not req.ok:
self.login()
else:
break
else:
_LOGGER.error('Unable to get API response from ZoneMinder')
try:
return req.json()
except ValueError:
_LOGGER.exception('JSON decode exception caught while'
'attempting to decode "%s"', req.text)
return {}
except requests.exceptions.ConnectionError:
_LOGGER.exception('Unable to connect to ZoneMinder')
return {}
def get_monitors(self) -> List[Monitor]:
"""Get a list of Monitors from the ZoneMinder API."""
raw_monitors = self._zm_request('get', ZoneMinder.MONITOR_URL)
if not raw_monitors:
_LOGGER.warning("Could not fetch monitors from ZoneMinder")
return []
monitors = []
for raw_result in raw_monitors['monitors']:
_LOGGER.debug("Initializing camera %s",
raw_result['Monitor']['Id'])
monitors.append(Monitor(self, raw_result))
return monitors
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
def get_active_state(self) -> Optional[str]:
"""Get the name of the active run state from the ZoneMinder API."""
for state in self.get_run_states():
if state.active:
return state.name
return None
def set_active_state(self, state_name):
"""
Set the ZoneMinder run state to the given state name, via ZM API.
Note that this is a long-running API call; ZoneMinder changes the state
of each camera in turn, and this GET does not receive a response until
all cameras have been updated. Even on a reasonably powerful machine,
this call can take ten (10) or more seconds **per camera**. This method
sets a timeout of 120, which should be adequate for most users.
"""
_LOGGER.info('Setting ZoneMinder run state to state %s', state_name)
return self._zm_request('GET',
'api/states/change/{}.json'.format(state_name),
timeout=120)
def get_zms_url(self) -> str:
"""Get the url to the current ZMS instance."""
return self._zms_url
def get_url_with_auth(self, url) -> str:
"""Add the auth credentials to a url (if needed)."""
if not self._username:
return url
url += '&user={:s}'.format(self._username)
if not self._password:
return url
return url + '&pass={:s}'.format(self._password)
@property
def is_available(self) -> bool:
"""Indicate if this ZoneMinder service is currently available."""
status_response = self.get_state(
'api/host/daemonCheck.json'
)
if not status_response:
return False
return status_response.get('result') == 1
@property
def verify_ssl(self) -> bool:
"""Indicate whether urls with http(s) should verify the certificate."""
return self._verify_ssl
@staticmethod
def _build_zms_url(server_host, zms_path) -> str:
"""Build the ZMS url to the current ZMS instance."""
return urljoin(server_host, zms_path)
@staticmethod
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
TimePeriod.get_time_period
|
python
|
def get_time_period(value):
for time_period in TimePeriod:
if time_period.period == value:
return time_period
raise ValueError('{} is not a valid TimePeriod'.format(value))
|
Get the corresponding TimePeriod from the value.
Example values: 'all', 'hour', 'day', 'week', or 'month'.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L41-L49
| null |
class TimePeriod(Enum):
"""Represents a period of time to check for events."""
@property
def period(self) -> str:
"""Get the period of time."""
# pylint: disable=unsubscriptable-object
return self.value[0]
@property
def title(self) -> str:
"""Explains what is measured in this period."""
# pylint: disable=unsubscriptable-object
return self.value[1]
@staticmethod
ALL = ('all', 'Events')
HOUR = ('hour', 'Events Last Hour')
DAY = ('day', 'Events Last Day')
WEEK = ('week', 'Events Last Week')
MONTH = ('month', 'Events Last Month')
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor.update_monitor
|
python
|
def update_monitor(self):
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
|
Update the monitor and monitor status from the ZM server.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L86-L89
| null |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
@property
def is_available(self) -> bool:
"""Indicate if this Monitor is currently available."""
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
def get_events(self, time_period, include_archived=False) -> Optional[int]:
"""Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
"""
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
def _build_image_url(self, monitor, mode) -> str:
"""Build and return a ZoneMinder camera image url."""
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor.function
|
python
|
def function(self, new_function):
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
|
Set the MonitorState of this Monitor.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L99-L103
|
[
"def update_monitor(self):\n \"\"\"Update the monitor and monitor status from the ZM server.\"\"\"\n result = self._client.get_state(self._monitor_url)\n self._raw_result = result['monitor']\n"
] |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
@property
def is_available(self) -> bool:
"""Indicate if this Monitor is currently available."""
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
def get_events(self, time_period, include_archived=False) -> Optional[int]:
"""Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
"""
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
def _build_image_url(self, monitor, mode) -> str:
"""Build and return a ZoneMinder camera image url."""
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor.is_recording
|
python
|
def is_recording(self) -> Optional[bool]:
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
|
Indicate if this Monitor is currently recording.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L121-L140
| null |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
@property
def is_available(self) -> bool:
"""Indicate if this Monitor is currently available."""
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
def get_events(self, time_period, include_archived=False) -> Optional[int]:
"""Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
"""
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
def _build_image_url(self, monitor, mode) -> str:
"""Build and return a ZoneMinder camera image url."""
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor.is_available
|
python
|
def is_available(self) -> bool:
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
|
Indicate if this Monitor is currently available.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L143-L161
| null |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
@property
def get_events(self, time_period, include_archived=False) -> Optional[int]:
"""Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
"""
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
def _build_image_url(self, monitor, mode) -> str:
"""Build and return a ZoneMinder camera image url."""
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor.get_events
|
python
|
def get_events(self, time_period, include_archived=False) -> Optional[int]:
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
|
Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L163-L192
| null |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
@property
def is_available(self) -> bool:
"""Indicate if this Monitor is currently available."""
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
def _build_image_url(self, monitor, mode) -> str:
"""Build and return a ZoneMinder camera image url."""
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
rohankapoorcom/zm-py
|
zoneminder/monitor.py
|
Monitor._build_image_url
|
python
|
def _build_image_url(self, monitor, mode) -> str:
query = urlencode({
'mode': mode,
'buffer': monitor['StreamReplayBuffer'],
'monitor': monitor['Id'],
})
url = '{zms_url}?{query}'.format(
zms_url=self._client.get_zms_url(), query=query)
_LOGGER.debug('Monitor %s %s URL (without auth): %s',
monitor['Id'], mode, url)
return self._client.get_url_with_auth(url)
|
Build and return a ZoneMinder camera image url.
|
train
|
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L194-L205
| null |
class Monitor:
"""Represents a Monitor from ZoneMinder."""
def __init__(self, client, raw_result):
"""Create a new Monitor."""
self._client = client
self._raw_result = raw_result
raw_monitor = raw_result['Monitor']
self._monitor_id = int(raw_monitor['Id'])
self._monitor_url = 'api/monitors/{}.json'.format(self._monitor_id)
self._name = raw_monitor['Name']
self._controllable = bool(raw_monitor['Controllable'])
self._mjpeg_image_url = self._build_image_url(
raw_monitor, 'jpeg')
self._still_image_url = self._build_image_url(
raw_monitor, 'single')
@property
def id(self) -> int:
"""Get the ZoneMinder id number of this Monitor."""
# pylint: disable=invalid-name
return self._monitor_id
@property
def name(self) -> str:
"""Get the name of this Monitor."""
return self._name
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor']
@property
def function(self) -> MonitorState:
"""Get the MonitorState of this Monitor."""
self.update_monitor()
return MonitorState(self._raw_result['Monitor']['Function'])
@function.setter
def function(self, new_function):
"""Set the MonitorState of this Monitor."""
self._client.change_state(
self._monitor_url,
{'Monitor[Function]': new_function.value})
@property
def controllable(self) -> bool:
"""Indicate whether this Monitor is movable."""
return self._controllable
@property
def mjpeg_image_url(self) -> str:
"""Get the motion jpeg (mjpeg) image url of this Monitor."""
return self._mjpeg_image_url
@property
def still_image_url(self) -> str:
"""Get the still jpeg image url of this Monitor."""
return self._still_image_url
@property
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM
@property
def is_available(self) -> bool:
"""Indicate if this Monitor is currently available."""
status_response = self._client.get_state(
'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get availability for monitor {}'.format(
self._monitor_id
))
return False
# Monitor_Status was only added in ZM 1.32.3
monitor_status = self._raw_result.get('Monitor_Status', None)
capture_fps = monitor_status and monitor_status['CaptureFPS']
return status_response.get('status', False) and capture_fps != "0.00"
def get_events(self, time_period, include_archived=False) -> Optional[int]:
"""Get the number of events that have occurred on this Monitor.
Specifically only gets events that have occurred within the TimePeriod
provided.
"""
date_filter = '1%20{}'.format(time_period.period)
if time_period == TimePeriod.ALL:
# The consoleEvents API uses DATE_SUB, so give it
# something large
date_filter = '100%20year'
archived_filter = '/Archived=:0'
if include_archived:
archived_filter = ''
event = self._client.get_state(
'api/events/consoleEvents/{}{}.json'.format(
date_filter,
archived_filter
)
)
try:
events_by_monitor = event['results']
if isinstance(events_by_monitor, list):
return 0
return events_by_monitor.get(str(self._monitor_id), 0)
except (TypeError, KeyError, AttributeError):
return None
|
neithere/monk
|
monk/modeling.py
|
StructuredDictMixin._insert_defaults
|
python
|
def _insert_defaults(self):
merged = merge_defaults(self.structure, self)
self.update(merged)
|
Inserts default values from :attr:`StructuredDictMixin.structure`
to `self` by merging the two structures
(see :func:`monk.manipulation.merge_defaults`).
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/modeling.py#L127-L133
|
[
"def merge_defaults(spec, value):\n \"\"\"\n Returns a copy of `value` recursively updated to match the `spec`:\n\n * New values are added whenever possible (including nested ones).\n * Existing values are never changed or removed.\n\n * Exception: container values (lists, dictionaries) may be populated;\n see respective merger functions for details.\n\n The result may not pass validation against the `spec`\n in the following cases:\n\n a) a required value is missing and the spec does not provide defaults;\n b) an existing value is invalid.\n\n The business logic is as follows:\n\n * if `value` is empty, use default value from `spec`;\n * if `value` is present or `spec` has no default value:\n\n * if `spec` datatype is present as a key in `mergers`,\n use the respective merger function to obtain the value;\n * if no merger is assigned to the datatype, use `fallback` function.\n\n See documentation on concrete merger functions for further details.\n\n :spec:\n A \"natural\" or \"verbose\" spec.\n\n :value:\n The value to merge into the `spec`.\n\n Examples::\n\n >>> merge_defaults('foo', None)\n 'foo'\n >>> merge_defaults('foo', 'bar')\n 'bar'\n >>> merge_defaults({'a': 'foo'}, {})\n {'a': 'foo'}\n >>> merge_defaults({'a': [{'b': 123}]},\n ... {'a': [{'b': None},\n ... {'x': 0}]})\n {'a': [{'b': 123}, {'b': 123, 'x': 0}]}\n\n \"\"\"\n\n validator = translate(spec)\n\n return validator.get_default_for(value)\n"
] |
class StructuredDictMixin(object):
""" A dictionary with structure specification and validation.
.. attribute:: structure
The document structure specification. For details see
:func:`monk.shortcuts.validate`.
"""
structure = {}
#defaults = {}
#required = []
#validators = {}
#with_skeleton = True
def validate(self):
validate(self.structure, self)
|
neithere/monk
|
monk/shortcuts.py
|
opt_key
|
python
|
def opt_key(spec):
if isinstance(spec, text_types):
spec = Equals(spec)
return optional(spec)
|
Returns a validator which allows the value to be missing.
Similar to :func:`optional` but wraps a string in
:class:`~monk.validators.Equals` instead of :class:`~monk.validators.IsA`.
Intended for dictionary keys.
::
>>> opt_key(str) == IsA(str) | ~Exists()
True
>>> opt_key('foo') == Equals('foo') | ~Exists()
True
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/shortcuts.py#L61-L78
|
[
"def optional(spec):\n \"\"\"\n Returns a validator which allows the value to be missing.\n\n ::\n\n >>> optional(str) == IsA(str) | ~Exists()\n True\n >>> optional('foo') == IsA(str, default='foo') | ~Exists()\n True\n\n Note that you should normally :func:`opt_key` to mark dictionary keys\n as optional.\n \"\"\"\n return translate(spec) | ~Exists()\n"
] |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~
Shortcuts
~~~~~~~~~
"""
from .compat import text_types
from . import Any, Equals, Exists, InRange, translate
__all__ = ['nullable', 'optional', 'opt_key', 'one_of']
def nullable(spec):
"""
Returns a validator which allows the value to be `None`.
::
>>> nullable(str) == IsA(str) | Equals(None)
True
"""
return translate(spec) | Equals(None)
def optional(spec):
"""
Returns a validator which allows the value to be missing.
::
>>> optional(str) == IsA(str) | ~Exists()
True
>>> optional('foo') == IsA(str, default='foo') | ~Exists()
True
Note that you should normally :func:`opt_key` to mark dictionary keys
as optional.
"""
return translate(spec) | ~Exists()
def one_of(choices, first_is_default=False, as_rules=False):
"""
A wrapper for :class:`Any`.
:param as_rules:
`bool`. If `False` (by default), each element of `choices`
is wrapped in the :class:`Equals` validator so they are interpreted
as literals.
.. deprecated:: 0.13
Use :class:`Any` instead.
"""
assert choices
if as_rules:
None # for coverage
else:
choices = [Equals(x) for x in choices]
return Any(choices, first_is_default=first_is_default)
def in_range(start, stop, first_is_default=False):
"""
A shortcut for a rule with :func:`~monk.validators.validate_range` validator.
::
# these expressions are equal:
in_range(0, 200)
Rule(int, validators=[monk.validators.validate_range(0, 200)])
# default value can be taken from the first choice:
in_range(0, 200, first_is_default=True)
Rule(int, default=0,
validators=[monk.validators.validate_range(0, 200)])
.. deprecated:: 0.13
Use :class:`InRange` instead.
"""
if first_is_default:
default_value = start
else:
default_value = None
return InRange(start, stop, default=default_value)
|
neithere/monk
|
monk/shortcuts.py
|
one_of
|
python
|
def one_of(choices, first_is_default=False, as_rules=False):
assert choices
if as_rules:
None # for coverage
else:
choices = [Equals(x) for x in choices]
return Any(choices, first_is_default=first_is_default)
|
A wrapper for :class:`Any`.
:param as_rules:
`bool`. If `False` (by default), each element of `choices`
is wrapped in the :class:`Equals` validator so they are interpreted
as literals.
.. deprecated:: 0.13
Use :class:`Any` instead.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/shortcuts.py#L81-L102
| null |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~
Shortcuts
~~~~~~~~~
"""
from .compat import text_types
from . import Any, Equals, Exists, InRange, translate
__all__ = ['nullable', 'optional', 'opt_key', 'one_of']
def nullable(spec):
"""
Returns a validator which allows the value to be `None`.
::
>>> nullable(str) == IsA(str) | Equals(None)
True
"""
return translate(spec) | Equals(None)
def optional(spec):
"""
Returns a validator which allows the value to be missing.
::
>>> optional(str) == IsA(str) | ~Exists()
True
>>> optional('foo') == IsA(str, default='foo') | ~Exists()
True
Note that you should normally :func:`opt_key` to mark dictionary keys
as optional.
"""
return translate(spec) | ~Exists()
def opt_key(spec):
"""
Returns a validator which allows the value to be missing.
Similar to :func:`optional` but wraps a string in
:class:`~monk.validators.Equals` instead of :class:`~monk.validators.IsA`.
Intended for dictionary keys.
::
>>> opt_key(str) == IsA(str) | ~Exists()
True
>>> opt_key('foo') == Equals('foo') | ~Exists()
True
"""
if isinstance(spec, text_types):
spec = Equals(spec)
return optional(spec)
def in_range(start, stop, first_is_default=False):
"""
A shortcut for a rule with :func:`~monk.validators.validate_range` validator.
::
# these expressions are equal:
in_range(0, 200)
Rule(int, validators=[monk.validators.validate_range(0, 200)])
# default value can be taken from the first choice:
in_range(0, 200, first_is_default=True)
Rule(int, default=0,
validators=[monk.validators.validate_range(0, 200)])
.. deprecated:: 0.13
Use :class:`InRange` instead.
"""
if first_is_default:
default_value = start
else:
default_value = None
return InRange(start, stop, default=default_value)
|
neithere/monk
|
monk/shortcuts.py
|
in_range
|
python
|
def in_range(start, stop, first_is_default=False):
if first_is_default:
default_value = start
else:
default_value = None
return InRange(start, stop, default=default_value)
|
A shortcut for a rule with :func:`~monk.validators.validate_range` validator.
::
# these expressions are equal:
in_range(0, 200)
Rule(int, validators=[monk.validators.validate_range(0, 200)])
# default value can be taken from the first choice:
in_range(0, 200, first_is_default=True)
Rule(int, default=0,
validators=[monk.validators.validate_range(0, 200)])
.. deprecated:: 0.13
Use :class:`InRange` instead.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/shortcuts.py#L105-L133
| null |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~
Shortcuts
~~~~~~~~~
"""
from .compat import text_types
from . import Any, Equals, Exists, InRange, translate
__all__ = ['nullable', 'optional', 'opt_key', 'one_of']
def nullable(spec):
"""
Returns a validator which allows the value to be `None`.
::
>>> nullable(str) == IsA(str) | Equals(None)
True
"""
return translate(spec) | Equals(None)
def optional(spec):
"""
Returns a validator which allows the value to be missing.
::
>>> optional(str) == IsA(str) | ~Exists()
True
>>> optional('foo') == IsA(str, default='foo') | ~Exists()
True
Note that you should normally :func:`opt_key` to mark dictionary keys
as optional.
"""
return translate(spec) | ~Exists()
def opt_key(spec):
"""
Returns a validator which allows the value to be missing.
Similar to :func:`optional` but wraps a string in
:class:`~monk.validators.Equals` instead of :class:`~monk.validators.IsA`.
Intended for dictionary keys.
::
>>> opt_key(str) == IsA(str) | ~Exists()
True
>>> opt_key('foo') == Equals('foo') | ~Exists()
True
"""
if isinstance(spec, text_types):
spec = Equals(spec)
return optional(spec)
def one_of(choices, first_is_default=False, as_rules=False):
"""
A wrapper for :class:`Any`.
:param as_rules:
`bool`. If `False` (by default), each element of `choices`
is wrapped in the :class:`Equals` validator so they are interpreted
as literals.
.. deprecated:: 0.13
Use :class:`Any` instead.
"""
assert choices
if as_rules:
None # for coverage
else:
choices = [Equals(x) for x in choices]
return Any(choices, first_is_default=first_is_default)
|
neithere/monk
|
monk/helpers.py
|
walk_dict
|
python
|
def walk_dict(data):
assert hasattr(data, '__getitem__')
for key, value in data.items():
if isinstance(value, dict):
yield (key,), None
for keys, value in walk_dict(value):
path = (key,) + keys
yield path, value
else:
yield (key,), value
|
Generates pairs ``(keys, value)`` for each item in given dictionary,
including nested dictionaries. Each pair contains:
`keys`
a tuple of 1..n keys, e.g. ``('foo',)`` for a key on root level or
``('foo', 'bar')`` for a key in a nested dictionary.
`value`
the value of given key or ``None`` if it is a nested dictionary and
therefore can be further unwrapped.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/helpers.py#L70-L89
|
[
"def walk_dict(data):\n \"\"\" Generates pairs ``(keys, value)`` for each item in given dictionary,\n including nested dictionaries. Each pair contains:\n\n `keys`\n a tuple of 1..n keys, e.g. ``('foo',)`` for a key on root level or\n ``('foo', 'bar')`` for a key in a nested dictionary.\n `value`\n the value of given key or ``None`` if it is a nested dictionary and\n therefore can be further unwrapped.\n \"\"\"\n assert hasattr(data, '__getitem__')\n for key, value in data.items():\n if isinstance(value, dict):\n yield (key,), None\n for keys, value in walk_dict(value):\n path = (key,) + keys\n yield path, value\n else:\n yield (key,), value\n"
] |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~
Helpers
~~~~~~~
"""
from .validators import translate
__all__ = [
# functions
'validate',
'walk_dict',
]
def validate(spec, value):
"""
Validates given value against given specification.
Raises an exception if the value is invalid.
Always returns ``None``.
In fact, it's just a very thin wrapper around the validators.
These three expressions are equal::
IsA(str)('foo')
translate(str)('foo')
validate(str, 'foo')
:spec:
a validator instance or any value digestible by :func:`translate`.
:value:
any value including complex structures.
Can raise:
:class:`MissingKey`
if a dictionary key is in the spec but not in the value.
This applies to root and nested dictionaries.
:class:`InvalidKey`
if a dictionary key is the value but not not in the spec.
:class:`StructureSpecificationError`
if errors were found in spec.
"""
validator = translate(spec)
validator(value)
|
neithere/monk
|
monk/validators.py
|
translate
|
python
|
def translate(value):
if isinstance(value, BaseValidator):
return value
if value is None:
return Anything()
if isinstance(value, type):
return IsA(value)
if type(value) in compat.func_types:
real_value = value()
return IsA(type(real_value), default=real_value)
if isinstance(value, list):
if value == []:
# no inner spec, just an empty list as the default value
return IsA(list)
elif len(value) == 1:
# the only item as spec for each item of the collection
return ListOf(translate(value[0]))
else:
raise StructureSpecificationError(
'Expected a list containing exactly 1 item; '
'got {cnt}: {spec}'.format(cnt=len(value), spec=value))
if isinstance(value, dict):
if not value:
return IsA(dict)
items = []
for k, v in value.items():
if isinstance(k, BaseValidator):
k_validator = k
else:
k_validator = translate(k)
default = k_validator.get_default_for(None)
if default is not None:
k_validator = Equals(default)
v_validator = translate(v)
items.append((k_validator, v_validator))
return DictOf(items)
return IsA(type(value), default=value)
|
Translates given schema from "pythonic" syntax to a validator.
Usage::
>>> translate(str)
IsA(str)
>>> translate('hello')
IsA(str, default='hello')
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L699-L753
|
[
"def translate(value):\n \"\"\"\n Translates given schema from \"pythonic\" syntax to a validator.\n\n Usage::\n\n >>> translate(str)\n IsA(str)\n\n >>> translate('hello')\n IsA(str, default='hello')\n\n \"\"\"\n if isinstance(value, BaseValidator):\n return value\n\n if value is None:\n return Anything()\n\n if isinstance(value, type):\n return IsA(value)\n\n if type(value) in compat.func_types:\n real_value = value()\n return IsA(type(real_value), default=real_value)\n\n if isinstance(value, list):\n if value == []:\n # no inner spec, just an empty list as the default value\n return IsA(list)\n elif len(value) == 1:\n # the only item as spec for each item of the collection\n return ListOf(translate(value[0]))\n else:\n raise StructureSpecificationError(\n 'Expected a list containing exactly 1 item; '\n 'got {cnt}: {spec}'.format(cnt=len(value), spec=value))\n\n if isinstance(value, dict):\n if not value:\n return IsA(dict)\n items = []\n for k, v in value.items():\n if isinstance(k, BaseValidator):\n k_validator = k\n else:\n k_validator = translate(k)\n default = k_validator.get_default_for(None)\n if default is not None:\n k_validator = Equals(default)\n v_validator = translate(v)\n items.append((k_validator, v_validator))\n return DictOf(items)\n\n return IsA(type(value), default=value)\n",
"def func():\n return 'hello'\n"
] |
# coding: utf-8
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~
Validators
~~~~~~~~~~
"""
__all__ = [
'BaseValidator',
# combinators
'BaseCombinator',
'All',
'Any',
# requirements
'BaseRequirement',
'Anything',
'Exists',
'IsA',
'HasAttr',
'Equals',
'Contains',
'InRange',
'Length',
'ListOf',
'ListOfAll',
'ListOfAny',
'DictOf',
# functions
'translate',
# special objects
'MISSING',
]
import copy
from . import compat
from .errors import (
CombinedValidationError, AtLeastOneFailed, AllFailed, ValidationError,
NoDefaultValue, InvalidKeys, MissingKeys, StructureSpecificationError,
DictValueError,
)
#: The value is valid if any of its items passes validation.
ITEM_STRATEGY_ANY = 'any'
#: The value is valid if all of its items pass validation.
ITEM_STRATEGY_ALL = 'all'
class MISSING:
"""
Stub for Exists validator to pass if the value is missing
(e.g. for dictionary keys).
"""
pass
def _reluctantly_translate(spec):
# `translate()` can do it itself but some validators have the `implies`
# attribute which can trigger instantiation of a BaseValidator subclass
# before the translation function is ready.
#
# We don't want to defer its usage as far as we can because it is best
# to fully build the validator in order to fail early.
#
# So this function is just a small barrier that prevents NameError
# in some cases.
if isinstance(spec, BaseValidator):
return spec
else:
return translate(spec)
class BaseValidator(object):
error_class = ValidationError
_default = NotImplemented
negated = False
def _combine(self, other, combinator):
# XXX should we flatten same-logic one-item combs?
if isinstance(other, type) and issubclass(other, BaseValidator):
# e.g. Exists instead of Exists()
raise TypeError('got {cls} class instead of its instance'
.format(cls=other.__name__))
return combinator([self, _reluctantly_translate(other)])
def _merge(self, value):
if value is not None:
raise NoDefaultValue('value is not None')
if self._default is NotImplemented:
raise NoDefaultValue('self._default is not implemented')
return self._default
def __and__(self, other):
return self._combine(other, All)
def __or__(self, other):
return self._combine(other, Any)
def __eq__(self, other):
return isinstance(other, type(self)) and self.__dict__ == other.__dict__
def __invert__(self):
clone = copy.deepcopy(self)
clone.negated = not self.negated
return clone
def __call__(self, value):
try:
self._check(value)
except ValidationError:
if self.negated:
return
else:
raise
else:
if self.negated:
self._raise_error(value)
def __hash__(self):
# TODO think this over and check Python docs
#return hash(((k,v) for k,v in self.__dict__.items()))
return hash('validator_'+str(self.__dict__))
def get_default_for(self, value, silent=True):
try:
return self._merge(value)
except NoDefaultValue:
if silent:
return value
else:
raise
def _check(self, value):
raise NotImplementedError
def _raise_error(self, value):
raise self.error_class(repr(self))
class BaseCombinator(BaseValidator):
error_class = CombinedValidationError
break_on_first_fail = False
_repr_tmpl = '{not_}({items})'
_repr_items_sep = '; '
def __init__(self, specs, default=None, first_is_default=False):
assert specs
self._specs = [_reluctantly_translate(s) for s in specs]
self._default = default
self._first_is_default = first_is_default
def _check(self, value):
errors = []
for spec in self._specs:
# TODO: group errors by exception type
# TODO: try recursive validators after all flat ones are OK
# (may be not a good idea because the order may matter)
#if spec.is_recursive and errors:
# # Don't collect nested errors if we already have one here.
# # Another optimized strategy would be to fail early instead of
# # trying to collect all exceptions for the node.
# continue
try:
spec(value)
except ValidationError as e:
if self.break_on_first_fail:
# don't even wrap the error
raise
errors.append(e)
if not self.can_tolerate(errors):
raise self.error_class(*errors)
def __repr__(self):
return self._repr_tmpl.format(
cls=self.__class__.__name__,
items=self._repr_items_sep.join(map(str, self._specs)),
not_='not ' if self.negated else '')
def can_tolerate(self, errors):
raise NotImplementedError
def _merge(self, value):
if self._default:
return self._default
defaults = []
for choice in self._specs:
try:
default = choice.get_default_for(value, silent=False)
except NoDefaultValue:
pass
else:
defaults.append(default)
if not defaults:
return value
if len(defaults) == 1:
return defaults[0]
else:
if self._first_is_default:
return defaults[0]
else:
return value
class All(BaseCombinator):
"""
Requires that the value passes all nested validators.
"""
error_class = AtLeastOneFailed
break_on_first_fail = True
_repr_items_sep = ' and '
def can_tolerate(self, errors):
# TODO: fail early, work as `or` does
# (this also enables basic if-then in the schema)
if not errors:
return True
class Any(BaseCombinator):
"""
Requires that the value passes at least one of nested validators.
"""
error_class = AllFailed
_repr_items_sep = ' or '
def can_tolerate(self, errors):
if len(errors) < len(self._specs):
return True
class BaseRequirement(BaseValidator):
# a hint for combinators, see their code
is_recursive = False
implies = NotImplemented
def __call__(self, value):
if self.implies is not NotImplemented:
self.implies(value)
super(BaseRequirement, self).__call__(value)
def _represent(self):
return self.__dict__
def __repr__(self):
return '{negated}{cls}({rep})'.format(
cls=self.__class__.__name__,
rep=self._represent(),
negated='~' if self.negated else '')
class Anything(BaseRequirement):
"""
Any values passes validation.
"""
def _check(self, value):
pass
def _represent(self):
return ''
class IsA(BaseRequirement):
"""
Requires that the value is an instance of given type.
"""
def __init__(self, expected_type, default=None):
self.expected_type = expected_type
self._default = default
def _check(self, value):
if not isinstance(value, self.expected_type):
self._raise_error(value)
def __repr__(self):
s = 'must be {pattern_}'.format(pattern_=self.expected_type.__name__)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
class Equals(BaseRequirement):
"""
Requires that the value equals given expected value.
"""
def __init__(self, expected_value):
self._expected_value = expected_value
def _check(self, value):
if self._expected_value != value:
self._raise_error(value)
def __repr__(self):
s = 'must equal {pattern_!r}'.format(pattern_=self._expected_value)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
@property
def _default(self):
return self._expected_value
class Contains(BaseRequirement):
"""
Requires that the value contains given expected value.
"""
def __init__(self, expected_value):
self._expected_value = expected_value
def _check(self, value):
if self._expected_value not in value:
self._raise_error(value)
def __repr__(self):
s = 'must contain {pattern_!r}'.format(pattern_=self._expected_value)
if self.negated:
s = 'not ({s})'.format(s=s)
return s
@property
def _default(self):
return self._expected_value
class Exists(BaseRequirement):
"""
Requires that the value exists. Obviously this only makes sense in
special cases like dictionary keys; otherwise there's simply nothing to
validate. Note that this is *not* a check against `None` or `False`.
"""
def __init__(self, default=None):
self._default = default
def _check(self, value):
if value is MISSING:
self._raise_error(value)
def __repr__(self):
if self.negated:
return 'must not exist'
else:
return 'must exist'
class BaseListOf(BaseRequirement):
"""
The base class for validating lists. Supports different error toleration
strategies which can be selected by subclasses. In many aspects this is
similar to :class:`BaseCombinator`.
"""
implies = IsA(list)
item_strategy = NotImplemented
error_class = CombinedValidationError
is_recursive = True
def __init__(self, validator, default=None):
self._nested_validator = translate(validator)
self._default = default
def _check(self, value):
if not value:
try:
self._nested_validator(MISSING)
except ValidationError as e:
raise ValidationError('lacks item: {error}'
.format(error=e))
errors = []
for i, nested_value in enumerate(value):
try:
self._nested_validator(nested_value)
except ValidationError as e:
annotated_error = ValidationError(
'item #{elem}: {error}'.format(elem=i, error=e))
if self.item_strategy == ITEM_STRATEGY_ALL:
raise annotated_error
errors.append(annotated_error)
if self.can_tolerate(errors, value):
return
raise self.error_class(*errors)
def can_tolerate(self, errors, value):
if self.item_strategy == ITEM_STRATEGY_ALL:
if errors:
return False
else:
return True
elif self.item_strategy == ITEM_STRATEGY_ANY:
if len(errors) < len(value):
return True
else:
return False
else:
raise ValueError('unknown strategy')
def _represent(self):
return repr(self._nested_validator)
def _merge(self, value):
""" Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
"""
if not value:
return []
if value is not None and not isinstance(value, list):
# bogus value; will not pass validation but should be preserved
return value
item_spec = self._nested_validator
return [x if x is None else item_spec.get_default_for(x) for x in value]
class ListOfAll(BaseListOf):
"""
Requires that the value is a `list` which items match given validator.
Usage::
>>> v = ListOfAll(IsA(int) | IsA(str))
>>> v([123, 'hello'])
>>> v([123, 'hello', 5.5])
Traceback (most recent call last):
...
ValidationError: item #2: must be int or must be str
"""
error_class = AtLeastOneFailed
item_strategy = ITEM_STRATEGY_ALL
class ListOfAny(BaseListOf):
"""
Same as :class:`ListOfAll` but tolerates invalid items as long as there
is at least one valid among them.
"""
error_class = AllFailed
item_strategy = ITEM_STRATEGY_ANY
ListOf = ListOfAll
#@requirement(implies=[IsA(dict)], is_recursive=True, vars=['key', 'req'])
#def dict_contains(ctx, value):
# nested_value = value[ctx['key']]
# ctx['req'](nested_value)
class DictOf(BaseRequirement):
"""
Requires that the value is a `dict` which items match given patterns.
Usage::
>>> v = DictOf([
... # key "name" must exist; its value must be a `str`
... (Equals('name'), IsA(str)),
... # key "age" may not exist; its value must be an `int`
... (Equals('age') | ~Exists(), IsA(int)),
... # there may be other `str` keys with `str` or `int` values
... (IsA(str), IsA(str) | IsA(int)),
... ])
>>> v({'name': 'John'})
>>> v({'name': 'John', 'age': 25})
>>> v({'name': 'John', 'age': 25.5})
Traceback (most recent call last):
...
DictValueError: 'age' value must be int
>>> v({'name': 'John', 'age': 25, 'note': 'custom field'})
>>> v({'name': 'John', 'age': 25, 'note': 5.5})
Traceback (most recent call last):
...
DictValueError: 'note' value must be str or must be int
Note that this validator supports :class:`Exists` to mark keys that can
be missing.
"""
implies = IsA(dict)
def __init__(self, pairs):
self._pairs = pairs
def _represent(self):
return repr(self._pairs)
def _check(self, value):
value = value or {}
validated_data_keys = []
missing_key_specs = []
for k_validator, v_validator in self._pairs:
# NOTE kspec.datatype can be None => any key of any datatype
# NOTE kspec.default can be None => any key of given datatype
# gather data keys that match given kspec;
# then validate them against vspec
matched = False
for k,v in value.items():
if k in validated_data_keys:
continue
# check if this key is described by current key validator;
# if it isn't, just skip it (and try another validator
# on it later on)
try:
k_validator(k)
except (TypeError, ValidationError):
continue
# this key *is* described by current value validator;
# validate the value (it *must* validate)
try:
v_validator(v)
except (ValidationError, TypeError) as e:
if isinstance(e, DictValueError):
msg = 'in {k!r} ({e})'
else:
msg = '{k!r} value {e}'
raise DictValueError(msg.format(k=k, e=e))
validated_data_keys.append(k)
matched = True
# if not matched and not k_validator.optional:
if not matched:
try:
k_validator(MISSING)
except ValidationError:
missing_key_specs.append(k_validator)
# TODO document that unknown keys are checked before missing ones
# check if there are data keys that did not match any key spec;
# if yes, raise InvalidKey for them
if len(validated_data_keys) < len(value):
invalid_keys = set(value) - set(validated_data_keys)
raise InvalidKeys(*invalid_keys)
if missing_key_specs:
# XXX this prints validators, not keys as strings;
# one exception is the Equals validator from which we get
# the expected value via internal API. And that's gross.
reprs = (spec._expected_value if isinstance(spec, Equals) else spec
for spec in missing_key_specs)
raise MissingKeys(*reprs)
def _merge(self, value):
"""
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
"""
if value is not None and not isinstance(value, dict):
# bogus value; will not pass validation but should be preserved
return value
if not self._pairs:
return {}
collected = {}
# collected.update(value)
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
# even None is ok
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected
class InRange(BaseRequirement):
"""
Requires that the numeric value is in given boundaries.
"""
implies = IsA(int) | IsA(float)
def __init__(self, min=None, max=None, default=NotImplemented):
self._min = min
self._max = max
if default is not NotImplemented:
self._default = default
def _check(self, value):
if self._min is not None and self._min > value:
self._raise_error(value)
if self._max is not None and self._max < value:
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
def _fmt(x):
return '' if x is None else x
return '{must} belong to {min_}..{max_}'.format(
must=must, min_=_fmt(self._min), max_=_fmt(self._max))
class HasAttr(BaseRequirement):
"""
Requires that the value has given attribute.
"""
def __init__(self, attr_name):
self._attr_name = attr_name
def _check(self, value):
if not hasattr(value, self._attr_name):
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
return '{must} have attribute {name!r}'.format(
must=must, name=self._attr_name)
class Length(InRange):
"""
Requires that the value length is in given boundaries.
"""
implies = HasAttr('__len__')
def _check(self, value):
try:
super(Length, self)._check(len(value))
except ValidationError as e:
self._raise_error(value)
def __repr__(self):
if self.negated:
must = 'must not'
else:
must = 'must'
def _fmt(x):
return '' if x is None else x
return '{must} have length of {min_}..{max_}'.format(
must=must, min_=_fmt(self._min), max_=_fmt(self._max))
|
neithere/monk
|
monk/validators.py
|
BaseListOf._merge
|
python
|
def _merge(self, value):
if not value:
return []
if value is not None and not isinstance(value, list):
# bogus value; will not pass validation but should be preserved
return value
item_spec = self._nested_validator
return [x if x is None else item_spec.get_default_for(x) for x in value]
|
Returns a list based on `value`:
* missing required value is converted to an empty list;
* missing required items are never created;
* nested items are merged recursively.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L431-L447
| null |
class BaseListOf(BaseRequirement):
"""
The base class for validating lists. Supports different error toleration
strategies which can be selected by subclasses. In many aspects this is
similar to :class:`BaseCombinator`.
"""
implies = IsA(list)
item_strategy = NotImplemented
error_class = CombinedValidationError
is_recursive = True
def __init__(self, validator, default=None):
self._nested_validator = translate(validator)
self._default = default
def _check(self, value):
if not value:
try:
self._nested_validator(MISSING)
except ValidationError as e:
raise ValidationError('lacks item: {error}'
.format(error=e))
errors = []
for i, nested_value in enumerate(value):
try:
self._nested_validator(nested_value)
except ValidationError as e:
annotated_error = ValidationError(
'item #{elem}: {error}'.format(elem=i, error=e))
if self.item_strategy == ITEM_STRATEGY_ALL:
raise annotated_error
errors.append(annotated_error)
if self.can_tolerate(errors, value):
return
raise self.error_class(*errors)
def can_tolerate(self, errors, value):
if self.item_strategy == ITEM_STRATEGY_ALL:
if errors:
return False
else:
return True
elif self.item_strategy == ITEM_STRATEGY_ANY:
if len(errors) < len(value):
return True
else:
return False
else:
raise ValueError('unknown strategy')
def _represent(self):
return repr(self._nested_validator)
|
neithere/monk
|
monk/validators.py
|
DictOf._merge
|
python
|
def _merge(self, value):
if value is not None and not isinstance(value, dict):
# bogus value; will not pass validation but should be preserved
return value
if not self._pairs:
return {}
collected = {}
# collected.update(value)
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
# even None is ok
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected
|
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L589-L622
| null |
class DictOf(BaseRequirement):
"""
Requires that the value is a `dict` which items match given patterns.
Usage::
>>> v = DictOf([
... # key "name" must exist; its value must be a `str`
... (Equals('name'), IsA(str)),
... # key "age" may not exist; its value must be an `int`
... (Equals('age') | ~Exists(), IsA(int)),
... # there may be other `str` keys with `str` or `int` values
... (IsA(str), IsA(str) | IsA(int)),
... ])
>>> v({'name': 'John'})
>>> v({'name': 'John', 'age': 25})
>>> v({'name': 'John', 'age': 25.5})
Traceback (most recent call last):
...
DictValueError: 'age' value must be int
>>> v({'name': 'John', 'age': 25, 'note': 'custom field'})
>>> v({'name': 'John', 'age': 25, 'note': 5.5})
Traceback (most recent call last):
...
DictValueError: 'note' value must be str or must be int
Note that this validator supports :class:`Exists` to mark keys that can
be missing.
"""
implies = IsA(dict)
def __init__(self, pairs):
self._pairs = pairs
def _represent(self):
return repr(self._pairs)
def _check(self, value):
value = value or {}
validated_data_keys = []
missing_key_specs = []
for k_validator, v_validator in self._pairs:
# NOTE kspec.datatype can be None => any key of any datatype
# NOTE kspec.default can be None => any key of given datatype
# gather data keys that match given kspec;
# then validate them against vspec
matched = False
for k,v in value.items():
if k in validated_data_keys:
continue
# check if this key is described by current key validator;
# if it isn't, just skip it (and try another validator
# on it later on)
try:
k_validator(k)
except (TypeError, ValidationError):
continue
# this key *is* described by current value validator;
# validate the value (it *must* validate)
try:
v_validator(v)
except (ValidationError, TypeError) as e:
if isinstance(e, DictValueError):
msg = 'in {k!r} ({e})'
else:
msg = '{k!r} value {e}'
raise DictValueError(msg.format(k=k, e=e))
validated_data_keys.append(k)
matched = True
# if not matched and not k_validator.optional:
if not matched:
try:
k_validator(MISSING)
except ValidationError:
missing_key_specs.append(k_validator)
# TODO document that unknown keys are checked before missing ones
# check if there are data keys that did not match any key spec;
# if yes, raise InvalidKey for them
if len(validated_data_keys) < len(value):
invalid_keys = set(value) - set(validated_data_keys)
raise InvalidKeys(*invalid_keys)
if missing_key_specs:
# XXX this prints validators, not keys as strings;
# one exception is the Equals validator from which we get
# the expected value via internal API. And that's gross.
reprs = (spec._expected_value if isinstance(spec, Equals) else spec
for spec in missing_key_specs)
raise MissingKeys(*reprs)
|
neithere/monk
|
monk/manipulation.py
|
normalize_list_of_dicts
|
python
|
def normalize_list_of_dicts(value, default_key, default_value=UNDEFINED):
if value is None:
if default_value is UNDEFINED:
return []
value = default_value
if isinstance(value, dict):
return [value]
if isinstance(value, text_type):
return [{default_key: value}]
if isinstance(value, list):
if not all(isinstance(x, dict) for x in value):
def _fix(x):
return {default_key: x} if isinstance(x, text_type) else x
return list(map(_fix, value))
return value
|
Converts given value to a list of dictionaries as follows:
* ``[{...}]`` → ``[{...}]``
* ``{...}`` → ``[{...}]``
* ``'xyz'`` → ``[{default_key: 'xyz'}]``
* ``None`` → ``[{default_key: default_value}]`` (if specified)
* ``None`` → ``[]``
:param default_value:
only Unicode, i.e. `str` in Python 3.x and **only** `unicode` in Python 2.x
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/manipulation.py#L108-L139
| null |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~~~~~~~~
Data manipulation
~~~~~~~~~~~~~~~~~
"""
from .compat import text_type
from . import translate
__all__ = [
# functions
'merge_defaults',
# helpers
'normalize_to_list', 'normalize_list_of_dicts',
]
def merge_defaults(spec, value):
"""
Returns a copy of `value` recursively updated to match the `spec`:
* New values are added whenever possible (including nested ones).
* Existing values are never changed or removed.
* Exception: container values (lists, dictionaries) may be populated;
see respective merger functions for details.
The result may not pass validation against the `spec`
in the following cases:
a) a required value is missing and the spec does not provide defaults;
b) an existing value is invalid.
The business logic is as follows:
* if `value` is empty, use default value from `spec`;
* if `value` is present or `spec` has no default value:
* if `spec` datatype is present as a key in `mergers`,
use the respective merger function to obtain the value;
* if no merger is assigned to the datatype, use `fallback` function.
See documentation on concrete merger functions for further details.
:spec:
A "natural" or "verbose" spec.
:value:
The value to merge into the `spec`.
Examples::
>>> merge_defaults('foo', None)
'foo'
>>> merge_defaults('foo', 'bar')
'bar'
>>> merge_defaults({'a': 'foo'}, {})
{'a': 'foo'}
>>> merge_defaults({'a': [{'b': 123}]},
... {'a': [{'b': None},
... {'x': 0}]})
{'a': [{'b': 123}, {'b': 123, 'x': 0}]}
"""
validator = translate(spec)
return validator.get_default_for(value)
class UNDEFINED:
pass
def normalize_to_list(value):
"""
Converts given value to a list as follows:
* ``[x]`` → ``[x]``
* ``x`` → ``[x]``
"""
if value and not isinstance(value, list):
return [value]
else:
return value
|
neithere/monk
|
monk/compat.py
|
safe_str
|
python
|
def safe_str(value):
if sys.version_info < (3,0) and isinstance(value, unicode):
return value.encode('utf-8')
else:
return str(value)
|
Returns:
* a `str` instance (bytes) in Python 2.x, or
* a `str` instance (Unicode) in Python 3.x.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/compat.py#L50-L60
| null |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~~~~
Compatibility
~~~~~~~~~~~~~
This module is intended to hide away implementation details of various Python
versions.
"""
import sys
import types
func_types = (
types.FunctionType,
types.MethodType,
# CPython: datetime.datetime.[utc]now()
types.BuiltinFunctionType,
)
if sys.version_info < (3,0):
text_types = unicode, str
text_type = unicode
binary_type = str
else:
text_types = str,
text_type = str
binary_type = bytes
def safe_unicode(value):
""" Returns:
* a `unicode` instance in Python 2.x, or
* a `str` instance in Python 3.x.
"""
if sys.version_info < (3,0):
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value)
else:
return str(value)
|
neithere/monk
|
monk/compat.py
|
safe_unicode
|
python
|
def safe_unicode(value):
if sys.version_info < (3,0):
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value)
else:
return str(value)
|
Returns:
* a `unicode` instance in Python 2.x, or
* a `str` instance in Python 3.x.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/compat.py#L63-L76
| null |
# -*- coding: utf-8 -*-
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~~~~
Compatibility
~~~~~~~~~~~~~
This module is intended to hide away implementation details of various Python
versions.
"""
import sys
import types
func_types = (
types.FunctionType,
types.MethodType,
# CPython: datetime.datetime.[utc]now()
types.BuiltinFunctionType,
)
if sys.version_info < (3,0):
text_types = unicode, str
text_type = unicode
binary_type = str
else:
text_types = str,
text_type = str
binary_type = bytes
def safe_str(value):
""" Returns:
* a `str` instance (bytes) in Python 2.x, or
* a `str` instance (Unicode) in Python 3.x.
"""
if sys.version_info < (3,0) and isinstance(value, unicode):
return value.encode('utf-8')
else:
return str(value)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.find
|
python
|
def find(cls, db, *args, **kwargs):
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
|
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L165-L183
| null |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
@classmethod
def get_one(cls, db, *args, **kwargs):
"""
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
"""
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
def save(self, db):
"""
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_id(self):
""" Returns object id or ``None``.
"""
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
def get_ref(self):
""" Returns a `DBRef` for this object or ``None``.
"""
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
def remove(self, db):
"""
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.get_one
|
python
|
def get_one(cls, db, *args, **kwargs):
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
|
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L186-L199
| null |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
def find(cls, db, *args, **kwargs):
"""
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
"""
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
@classmethod
def save(self, db):
"""
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_id(self):
""" Returns object id or ``None``.
"""
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
def get_ref(self):
""" Returns a `DBRef` for this object or ``None``.
"""
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
def remove(self, db):
"""
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.save
|
python
|
def save(self, db):
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
|
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L201-L224
|
[
"def dict_to_db(data, spec={}):\n return dict(_dict_to_db_pairs(spec, data))\n",
"def _ensure_indexes(cls, db):\n for field, kwargs in cls.indexes.items():\n kwargs = kwargs or {}\n db[cls.collection].ensure_index(field, **kwargs)\n"
] |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
def find(cls, db, *args, **kwargs):
"""
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
"""
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
@classmethod
def get_one(cls, db, *args, **kwargs):
"""
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
"""
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_id(self):
""" Returns object id or ``None``.
"""
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
def get_ref(self):
""" Returns a `DBRef` for this object or ``None``.
"""
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
def remove(self, db):
"""
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.get_id
|
python
|
def get_id(self):
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
|
Returns object id or ``None``.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L232-L239
| null |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
def find(cls, db, *args, **kwargs):
"""
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
"""
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
@classmethod
def get_one(cls, db, *args, **kwargs):
"""
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
"""
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
def save(self, db):
"""
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_ref(self):
""" Returns a `DBRef` for this object or ``None``.
"""
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
def remove(self, db):
"""
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.get_ref
|
python
|
def get_ref(self):
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
|
Returns a `DBRef` for this object or ``None``.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L241-L248
| null |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
def find(cls, db, *args, **kwargs):
"""
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
"""
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
@classmethod
def get_one(cls, db, *args, **kwargs):
"""
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
"""
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
def save(self, db):
"""
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_id(self):
""" Returns object id or ``None``.
"""
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
def remove(self, db):
"""
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
neithere/monk
|
monk/mongo.py
|
MongoBoundDictMixin.remove
|
python
|
def remove(self, db):
assert self.collection
assert self.id
db[self.collection].remove(self.id)
|
Removes the object from given database. Usage::
item = Item.get_one(db)
item.remove(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
|
train
|
https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/mongo.py#L250-L262
| null |
class MongoBoundDictMixin(object):
""" Adds MongoDB-specific features to the dictionary.
.. attribute:: collection
Collection name.
.. attribute:: indexes
(TODO)
"""
collection = None
indexes = {}
def __hash__(self):
""" Collection name and id together make the hash; document class
doesn't matter.
Raises `TypeError` if collection or id is not set.
"""
if self.collection and self.id:
return hash(self.collection) | hash(self.id)
raise TypeError('Document is unhashable: collection or id is not set')
def __eq__(self, other):
# both must inherit to this class
if not isinstance(other, MongoBoundDictMixin):
return False
# both must have collections defined
if not self.collection or not other.collection:
return False
# both must have ids
if not self.id or not other.id:
return False
# collections must be equal
if self.collection != other.collection:
return False
# ids must be equal
if self.id != other.id:
return False
return True
def __ne__(self, other):
# this is required to override the call to dict.__eq__()
return not self.__eq__(other)
@classmethod
def _ensure_indexes(cls, db):
for field, kwargs in cls.indexes.items():
kwargs = kwargs or {}
db[cls.collection].ensure_index(field, **kwargs)
@classmethod
def wrap_incoming(cls, data, db):
# XXX self.structure belongs to StructuredDictMixin !!
return cls(dict_from_db(cls.structure, data, db))
@classmethod
def find(cls, db, *args, **kwargs):
"""
Returns a :class:`MongoResultSet` object.
Example::
items = Item.find(db, {'title': u'Hello'})
.. note::
The arguments are those of pymongo collection's `find` method.
A frequent error is to pass query key/value pairs as keyword
arguments. This is **wrong**. In most cases you will want to pass
a dictionary ("query spec") as the first positional argument.
"""
cls._ensure_indexes(db)
docs = db[cls.collection].find(*args, **kwargs)
return MongoResultSet(docs, partial(cls.wrap_incoming, db=db))
@classmethod
def get_one(cls, db, *args, **kwargs):
"""
Returns an object that corresponds to given query or ``None``.
Example::
item = Item.get_one(db, {'title': u'Hello'})
"""
data = db[cls.collection].find_one(*args, **kwargs)
if data:
return cls.wrap_incoming(data, db)
else:
return None
def save(self, db):
"""
Saves the object to given database. Usage::
item = Item(title=u'Hello')
item.save(db)
Collection name is taken from :attr:`MongoBoundDictMixin.collection`.
"""
assert self.collection
self._ensure_indexes(db)
# XXX self.structure belongs to StructuredDictMixin !!
outgoing = dict(dict_to_db(self, self.structure))
object_id = db[self.collection].save(outgoing)
if self.get('_id') is None:
self['_id'] = object_id
else:
pass
return object_id
@property
def id(self):
""" Returns object id or ``None``.
"""
return self.get('_id')
def get_id(self):
""" Returns object id or ``None``.
"""
import warnings
warnings.warn('{0}.get_id() is deprecated, '
'use {0}.id instead'.format(type(self).__name__),
DeprecationWarning)
return self.get('_id')
def get_ref(self):
""" Returns a `DBRef` for this object or ``None``.
"""
_id = self.id
if _id is None:
return None
else:
return DBRef(self.collection, _id)
|
COALAIP/pycoalaip
|
coalaip/data_formats.py
|
_copy_context_into_mutable
|
python
|
def _copy_context_into_mutable(context):
def make_mutable(val):
if isinstance(val, Mapping):
return dict(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return [make_mutable(val) for val in context]
except TypeError:
pass
return make_mutable(context)
|
Copy a properly formatted context into a mutable data structure.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/data_formats.py#L17-L31
|
[
"def make_mutable(val):\n if isinstance(val, Mapping):\n return dict(val)\n else:\n return val\n"
] |
"""Utilities for data formats supported by pycoalaip."""
from collections import namedtuple, Mapping
from copy import copy
from enum import Enum, unique
from types import MappingProxyType
@unique
class DataFormat(Enum):
"""Enum of supported data formats."""
json = 'json'
jsonld = 'jsonld'
ipld = 'ipld'
def _make_context_immutable(context):
"""Best effort attempt at turning a properly formatted context
(either a string, dict, or array of strings and dicts) into an
immutable data structure.
If we get an array, make it immutable by creating a tuple; if we get
a dict, copy it into a MappingProxyType. Otherwise, return as-is.
"""
def make_immutable(val):
if isinstance(val, Mapping):
return MappingProxyType(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return tuple([make_immutable(val) for val in context])
except TypeError:
pass
return make_immutable(context)
def _data_format_resolver(data_format, resolver_dict):
"""Resolve a value from :attr:`resolver_dict` based on the
:attr:`data_format`.
Args:
data_format (:class:`~.DataFormat` or str): The data format;
must be a member of :class:`~.DataFormat` or a string
equivalent.
resolver_dict (dict): the resolving dict. Can hold any value
for any of the valid :attr:`data_format` strings
Returns:
The value of the key in :attr:`resolver_dict` that matches
:attr:`data_format`
"""
try:
data_format = DataFormat(data_format)
except ValueError:
supported_formats = ', '.join(
["'{}'".format(f.value) for f in DataFormat])
raise ValueError(("'data_format' must be one of {formats}. Given "
"'{value}'.").format(formats=supported_formats,
value=data_format))
return (resolver_dict.get(data_format) or
resolver_dict.get(data_format.value))
ExtractedLinkedDataResult = namedtuple('ExtractedLinkedDataResult', [
'data',
'ld_type',
'ld_context',
'ld_id'
])
def _extract_ld_data(data, data_format=None, **kwargs):
"""Extract the given :attr:`data` into a
:class:`~.ExtractedLinkedDataResult` with the resulting data
stripped of any Linked Data specifics. Any missing Linked Data
properties are returned as ``None`` in the resulting
:class:`~.ExtractLinkedDataResult`.
Does not modify the given :attr:`data`.
"""
if not data_format:
data_format = _get_format_from_data(data)
extract_ld_data_fn = _data_format_resolver(data_format, {
'jsonld': _extract_ld_data_from_jsonld,
'json': _extract_ld_data_from_json,
'ipld': _extract_ld_data_from_ipld,
})
return extract_ld_data_fn(data, **kwargs)
def _extract_ld_data_from_jsonld(data, type_key='@type',
context_key='@context', id_key='@id',
**kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key,
context_key=context_key, id_key=id_key,
**kwargs)
def _extract_ld_data_from_json(data, type_key='type', **kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key, **kwargs)
def _extract_ld_data_from_ipld(data, type_key='type', **kwargs):
raise NotImplementedError(('Extracting data from IPLD has not been '
'implemented yet'))
def _extract_ld_data_from_keys(orig_data, type_key=None, context_key=None,
id_key=None):
data = copy(orig_data)
extracted_kwargs = {
'ld_type': None,
'ld_context': None,
'ld_id': None
}
if type_key and type_key in data:
extracted_kwargs['ld_type'] = data[type_key]
del data[type_key]
if context_key and context_key in data:
extracted_kwargs['ld_context'] = data[context_key]
del data[context_key]
if id_key and id_key in data:
extracted_kwargs['ld_id'] = data[id_key]
del data[id_key]
return ExtractedLinkedDataResult(data, **extracted_kwargs)
def _get_format_from_data(data):
# TODO: add IPLD
if bool(data.get('@type') or data.get('@context') or data.get('@id')):
return DataFormat.jsonld
else:
return DataFormat.json
|
COALAIP/pycoalaip
|
coalaip/data_formats.py
|
_make_context_immutable
|
python
|
def _make_context_immutable(context):
def make_immutable(val):
if isinstance(val, Mapping):
return MappingProxyType(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return tuple([make_immutable(val) for val in context])
except TypeError:
pass
return make_immutable(context)
|
Best effort attempt at turning a properly formatted context
(either a string, dict, or array of strings and dicts) into an
immutable data structure.
If we get an array, make it immutable by creating a tuple; if we get
a dict, copy it into a MappingProxyType. Otherwise, return as-is.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/data_formats.py#L34-L53
|
[
"def make_immutable(val):\n if isinstance(val, Mapping):\n return MappingProxyType(val)\n else:\n return val\n"
] |
"""Utilities for data formats supported by pycoalaip."""
from collections import namedtuple, Mapping
from copy import copy
from enum import Enum, unique
from types import MappingProxyType
@unique
class DataFormat(Enum):
"""Enum of supported data formats."""
json = 'json'
jsonld = 'jsonld'
ipld = 'ipld'
def _copy_context_into_mutable(context):
"""Copy a properly formatted context into a mutable data structure.
"""
def make_mutable(val):
if isinstance(val, Mapping):
return dict(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return [make_mutable(val) for val in context]
except TypeError:
pass
return make_mutable(context)
def _data_format_resolver(data_format, resolver_dict):
"""Resolve a value from :attr:`resolver_dict` based on the
:attr:`data_format`.
Args:
data_format (:class:`~.DataFormat` or str): The data format;
must be a member of :class:`~.DataFormat` or a string
equivalent.
resolver_dict (dict): the resolving dict. Can hold any value
for any of the valid :attr:`data_format` strings
Returns:
The value of the key in :attr:`resolver_dict` that matches
:attr:`data_format`
"""
try:
data_format = DataFormat(data_format)
except ValueError:
supported_formats = ', '.join(
["'{}'".format(f.value) for f in DataFormat])
raise ValueError(("'data_format' must be one of {formats}. Given "
"'{value}'.").format(formats=supported_formats,
value=data_format))
return (resolver_dict.get(data_format) or
resolver_dict.get(data_format.value))
ExtractedLinkedDataResult = namedtuple('ExtractedLinkedDataResult', [
'data',
'ld_type',
'ld_context',
'ld_id'
])
def _extract_ld_data(data, data_format=None, **kwargs):
"""Extract the given :attr:`data` into a
:class:`~.ExtractedLinkedDataResult` with the resulting data
stripped of any Linked Data specifics. Any missing Linked Data
properties are returned as ``None`` in the resulting
:class:`~.ExtractLinkedDataResult`.
Does not modify the given :attr:`data`.
"""
if not data_format:
data_format = _get_format_from_data(data)
extract_ld_data_fn = _data_format_resolver(data_format, {
'jsonld': _extract_ld_data_from_jsonld,
'json': _extract_ld_data_from_json,
'ipld': _extract_ld_data_from_ipld,
})
return extract_ld_data_fn(data, **kwargs)
def _extract_ld_data_from_jsonld(data, type_key='@type',
context_key='@context', id_key='@id',
**kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key,
context_key=context_key, id_key=id_key,
**kwargs)
def _extract_ld_data_from_json(data, type_key='type', **kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key, **kwargs)
def _extract_ld_data_from_ipld(data, type_key='type', **kwargs):
raise NotImplementedError(('Extracting data from IPLD has not been '
'implemented yet'))
def _extract_ld_data_from_keys(orig_data, type_key=None, context_key=None,
id_key=None):
data = copy(orig_data)
extracted_kwargs = {
'ld_type': None,
'ld_context': None,
'ld_id': None
}
if type_key and type_key in data:
extracted_kwargs['ld_type'] = data[type_key]
del data[type_key]
if context_key and context_key in data:
extracted_kwargs['ld_context'] = data[context_key]
del data[context_key]
if id_key and id_key in data:
extracted_kwargs['ld_id'] = data[id_key]
del data[id_key]
return ExtractedLinkedDataResult(data, **extracted_kwargs)
def _get_format_from_data(data):
# TODO: add IPLD
if bool(data.get('@type') or data.get('@context') or data.get('@id')):
return DataFormat.jsonld
else:
return DataFormat.json
|
COALAIP/pycoalaip
|
coalaip/data_formats.py
|
_data_format_resolver
|
python
|
def _data_format_resolver(data_format, resolver_dict):
try:
data_format = DataFormat(data_format)
except ValueError:
supported_formats = ', '.join(
["'{}'".format(f.value) for f in DataFormat])
raise ValueError(("'data_format' must be one of {formats}. Given "
"'{value}'.").format(formats=supported_formats,
value=data_format))
return (resolver_dict.get(data_format) or
resolver_dict.get(data_format.value))
|
Resolve a value from :attr:`resolver_dict` based on the
:attr:`data_format`.
Args:
data_format (:class:`~.DataFormat` or str): The data format;
must be a member of :class:`~.DataFormat` or a string
equivalent.
resolver_dict (dict): the resolving dict. Can hold any value
for any of the valid :attr:`data_format` strings
Returns:
The value of the key in :attr:`resolver_dict` that matches
:attr:`data_format`
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/data_formats.py#L56-L80
| null |
"""Utilities for data formats supported by pycoalaip."""
from collections import namedtuple, Mapping
from copy import copy
from enum import Enum, unique
from types import MappingProxyType
@unique
class DataFormat(Enum):
"""Enum of supported data formats."""
json = 'json'
jsonld = 'jsonld'
ipld = 'ipld'
def _copy_context_into_mutable(context):
"""Copy a properly formatted context into a mutable data structure.
"""
def make_mutable(val):
if isinstance(val, Mapping):
return dict(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return [make_mutable(val) for val in context]
except TypeError:
pass
return make_mutable(context)
def _make_context_immutable(context):
"""Best effort attempt at turning a properly formatted context
(either a string, dict, or array of strings and dicts) into an
immutable data structure.
If we get an array, make it immutable by creating a tuple; if we get
a dict, copy it into a MappingProxyType. Otherwise, return as-is.
"""
def make_immutable(val):
if isinstance(val, Mapping):
return MappingProxyType(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return tuple([make_immutable(val) for val in context])
except TypeError:
pass
return make_immutable(context)
ExtractedLinkedDataResult = namedtuple('ExtractedLinkedDataResult', [
'data',
'ld_type',
'ld_context',
'ld_id'
])
def _extract_ld_data(data, data_format=None, **kwargs):
"""Extract the given :attr:`data` into a
:class:`~.ExtractedLinkedDataResult` with the resulting data
stripped of any Linked Data specifics. Any missing Linked Data
properties are returned as ``None`` in the resulting
:class:`~.ExtractLinkedDataResult`.
Does not modify the given :attr:`data`.
"""
if not data_format:
data_format = _get_format_from_data(data)
extract_ld_data_fn = _data_format_resolver(data_format, {
'jsonld': _extract_ld_data_from_jsonld,
'json': _extract_ld_data_from_json,
'ipld': _extract_ld_data_from_ipld,
})
return extract_ld_data_fn(data, **kwargs)
def _extract_ld_data_from_jsonld(data, type_key='@type',
context_key='@context', id_key='@id',
**kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key,
context_key=context_key, id_key=id_key,
**kwargs)
def _extract_ld_data_from_json(data, type_key='type', **kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key, **kwargs)
def _extract_ld_data_from_ipld(data, type_key='type', **kwargs):
raise NotImplementedError(('Extracting data from IPLD has not been '
'implemented yet'))
def _extract_ld_data_from_keys(orig_data, type_key=None, context_key=None,
id_key=None):
data = copy(orig_data)
extracted_kwargs = {
'ld_type': None,
'ld_context': None,
'ld_id': None
}
if type_key and type_key in data:
extracted_kwargs['ld_type'] = data[type_key]
del data[type_key]
if context_key and context_key in data:
extracted_kwargs['ld_context'] = data[context_key]
del data[context_key]
if id_key and id_key in data:
extracted_kwargs['ld_id'] = data[id_key]
del data[id_key]
return ExtractedLinkedDataResult(data, **extracted_kwargs)
def _get_format_from_data(data):
# TODO: add IPLD
if bool(data.get('@type') or data.get('@context') or data.get('@id')):
return DataFormat.jsonld
else:
return DataFormat.json
|
COALAIP/pycoalaip
|
coalaip/data_formats.py
|
_extract_ld_data
|
python
|
def _extract_ld_data(data, data_format=None, **kwargs):
if not data_format:
data_format = _get_format_from_data(data)
extract_ld_data_fn = _data_format_resolver(data_format, {
'jsonld': _extract_ld_data_from_jsonld,
'json': _extract_ld_data_from_json,
'ipld': _extract_ld_data_from_ipld,
})
return extract_ld_data_fn(data, **kwargs)
|
Extract the given :attr:`data` into a
:class:`~.ExtractedLinkedDataResult` with the resulting data
stripped of any Linked Data specifics. Any missing Linked Data
properties are returned as ``None`` in the resulting
:class:`~.ExtractLinkedDataResult`.
Does not modify the given :attr:`data`.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/data_formats.py#L91-L108
|
[
"def _data_format_resolver(data_format, resolver_dict):\n \"\"\"Resolve a value from :attr:`resolver_dict` based on the\n :attr:`data_format`.\n\n Args:\n data_format (:class:`~.DataFormat` or str): The data format;\n must be a member of :class:`~.DataFormat` or a string\n equivalent.\n resolver_dict (dict): the resolving dict. Can hold any value\n for any of the valid :attr:`data_format` strings\n\n Returns:\n The value of the key in :attr:`resolver_dict` that matches\n :attr:`data_format`\n \"\"\"\n try:\n data_format = DataFormat(data_format)\n except ValueError:\n supported_formats = ', '.join(\n [\"'{}'\".format(f.value) for f in DataFormat])\n raise ValueError((\"'data_format' must be one of {formats}. Given \"\n \"'{value}'.\").format(formats=supported_formats,\n value=data_format))\n return (resolver_dict.get(data_format) or\n resolver_dict.get(data_format.value))\n",
"def _get_format_from_data(data):\n # TODO: add IPLD\n if bool(data.get('@type') or data.get('@context') or data.get('@id')):\n return DataFormat.jsonld\n else:\n return DataFormat.json\n"
] |
"""Utilities for data formats supported by pycoalaip."""
from collections import namedtuple, Mapping
from copy import copy
from enum import Enum, unique
from types import MappingProxyType
@unique
class DataFormat(Enum):
"""Enum of supported data formats."""
json = 'json'
jsonld = 'jsonld'
ipld = 'ipld'
def _copy_context_into_mutable(context):
"""Copy a properly formatted context into a mutable data structure.
"""
def make_mutable(val):
if isinstance(val, Mapping):
return dict(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return [make_mutable(val) for val in context]
except TypeError:
pass
return make_mutable(context)
def _make_context_immutable(context):
"""Best effort attempt at turning a properly formatted context
(either a string, dict, or array of strings and dicts) into an
immutable data structure.
If we get an array, make it immutable by creating a tuple; if we get
a dict, copy it into a MappingProxyType. Otherwise, return as-is.
"""
def make_immutable(val):
if isinstance(val, Mapping):
return MappingProxyType(val)
else:
return val
if not isinstance(context, (str, Mapping)):
try:
return tuple([make_immutable(val) for val in context])
except TypeError:
pass
return make_immutable(context)
def _data_format_resolver(data_format, resolver_dict):
"""Resolve a value from :attr:`resolver_dict` based on the
:attr:`data_format`.
Args:
data_format (:class:`~.DataFormat` or str): The data format;
must be a member of :class:`~.DataFormat` or a string
equivalent.
resolver_dict (dict): the resolving dict. Can hold any value
for any of the valid :attr:`data_format` strings
Returns:
The value of the key in :attr:`resolver_dict` that matches
:attr:`data_format`
"""
try:
data_format = DataFormat(data_format)
except ValueError:
supported_formats = ', '.join(
["'{}'".format(f.value) for f in DataFormat])
raise ValueError(("'data_format' must be one of {formats}. Given "
"'{value}'.").format(formats=supported_formats,
value=data_format))
return (resolver_dict.get(data_format) or
resolver_dict.get(data_format.value))
ExtractedLinkedDataResult = namedtuple('ExtractedLinkedDataResult', [
'data',
'ld_type',
'ld_context',
'ld_id'
])
def _extract_ld_data_from_jsonld(data, type_key='@type',
context_key='@context', id_key='@id',
**kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key,
context_key=context_key, id_key=id_key,
**kwargs)
def _extract_ld_data_from_json(data, type_key='type', **kwargs):
return _extract_ld_data_from_keys(data, type_key=type_key, **kwargs)
def _extract_ld_data_from_ipld(data, type_key='type', **kwargs):
raise NotImplementedError(('Extracting data from IPLD has not been '
'implemented yet'))
def _extract_ld_data_from_keys(orig_data, type_key=None, context_key=None,
id_key=None):
data = copy(orig_data)
extracted_kwargs = {
'ld_type': None,
'ld_context': None,
'ld_id': None
}
if type_key and type_key in data:
extracted_kwargs['ld_type'] = data[type_key]
del data[type_key]
if context_key and context_key in data:
extracted_kwargs['ld_context'] = data[context_key]
del data[context_key]
if id_key and id_key in data:
extracted_kwargs['ld_id'] = data[id_key]
del data[id_key]
return ExtractedLinkedDataResult(data, **extracted_kwargs)
def _get_format_from_data(data):
# TODO: add IPLD
if bool(data.get('@type') or data.get('@context') or data.get('@id')):
return DataFormat.jsonld
else:
return DataFormat.json
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
is_callable
|
python
|
def is_callable(instance, attribute, value):
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
|
Raises a :exc:`TypeError` if the value is not a callable.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L6-L10
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
use_model_attr
|
python
|
def use_model_attr(attr):
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
|
Use the validator set on a separate attribute on the class.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L13-L18
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
does_not_contain
|
python
|
def does_not_contain(*avoid_keys, error_cls=ValueError):
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
|
Decorator: value must not contain any of the :attr:`avoid_keys`.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L21-L42
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
is_creation_model
|
python
|
def is_creation_model(instance, attribute, value):
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
|
Must include at least a ``name`` key.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L45-L56
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
is_manifestation_model
|
python
|
def is_manifestation_model(instance, attribute, value):
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
|
Must include a ``manifestationOfWork`` key.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L68-L81
|
[
"def is_creation_model(instance, attribute, value):\n \"\"\"Must include at least a ``name`` key.\"\"\"\n\n creation_name = value.get('name')\n if not isinstance(creation_name, str):\n instance_name = instance.__class__.__name__\n err_str = (\"'name' must be given as a string in the '{attr}' \"\n \"parameter of a '{cls}'. Given \"\n \"'{value}'\").format(attr=attribute.name,\n cls=instance_name,\n value=creation_name)\n raise ModelDataError(err_str)\n"
] |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
is_right_model
|
python
|
def is_right_model(instance, attribute, value):
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
|
Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L85-L101
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
@does_not_contain('source', error_cls=ModelDataError)
def is_copyright_model(instance, attribute, value):
"""Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
"""
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
COALAIP/pycoalaip
|
coalaip/model_validators.py
|
is_copyright_model
|
python
|
def is_copyright_model(instance, attribute, value):
rights_of = value.get('rightsOf')
if not isinstance(rights_of, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'rightsOf' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=rights_of))
|
Must include at least a ``rightsOf`` key, but not a ``source``
key (``rightsOf`` indicates that the Right contains full rights to
an existing Manifestation or Work; i.e. is a Copyright).
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L105-L118
| null |
"""Validators for COALA IP models (:mod:`coalaip.models`)"""
from coalaip.exceptions import ModelDataError
def is_callable(instance, attribute, value):
"""Raises a :exc:`TypeError` if the value is not a callable."""
if not callable(value):
raise TypeError("'{}' must be callable".format(attribute.name))
def use_model_attr(attr):
"""Use the validator set on a separate attribute on the class."""
def use_model_validator(instance, attribute, value):
getattr(instance, attr)(instance, attribute, value)
return use_model_validator
def does_not_contain(*avoid_keys, error_cls=ValueError):
"""Decorator: value must not contain any of the :attr:`avoid_keys`.
"""
def decorator(func):
def not_contains(instance, attribute, value):
instance_name = instance.__class__.__name__
num_matched_keys = len(set(avoid_keys) & value.keys())
if num_matched_keys > 0:
avoid_keys_str = ', '.join(avoid_keys)
err_str = ("Given keys ({num_matched} of {{avoid_keys}} "
"that must not be given in the '{attr}' of a "
"'{cls}'").format(num_matched=num_matched_keys,
avoid_keys=avoid_keys_str,
attr=attribute.name,
cls=instance_name)
raise error_cls(err_str)
return func(instance, attribute, value)
return not_contains
return decorator
def is_creation_model(instance, attribute, value):
"""Must include at least a ``name`` key."""
creation_name = value.get('name')
if not isinstance(creation_name, str):
instance_name = instance.__class__.__name__
err_str = ("'name' must be given as a string in the '{attr}' "
"parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=creation_name)
raise ModelDataError(err_str)
@does_not_contain('manifestationOfWork', error_cls=ModelDataError)
def is_work_model(instance, attribute, value):
"""Must not include keys that indicate the model is a
:class:`~.Manifestation` (e.g. ``manifestationOfWork``).
"""
is_creation_model(instance, attribute, value)
def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str)
@does_not_contain('rightsOf', error_cls=ModelDataError)
def is_right_model(instance, attribute, value):
"""Must include at least the ``source`` and ``license`` keys, but
not a ``rightsOf`` key (``source`` indicates that the Right is
derived from and allowed by a source Right; it cannot contain the
full rights to a Creation).
"""
for key in ['source', 'license']:
key_value = value.get(key)
if not isinstance(key_value, str):
instance_name = instance.__class__.__name__
raise ModelDataError(("'{key}' must be given as a string in "
"the '{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(key=key,
attr=attribute.name,
cls=instance_name,
value=key_value))
@does_not_contain('source', error_cls=ModelDataError)
|
COALAIP/pycoalaip
|
coalaip/entities.py
|
TransferrableEntity.transfer
|
python
|
def transfer(self, transfer_payload=None, *, from_user, to_user):
if self.persist_id is None:
raise EntityNotYetPersistedError(('Entities cannot be transferred '
'until they have been '
'persisted'))
return self.plugin.transfer(self.persist_id, transfer_payload,
from_user=from_user, to_user=to_user)
|
Transfer this entity to another owner on the backing
persistence layer
Args:
transfer_payload (dict): Payload for the transfer
from_user (any): A user based on the model specified by the
persistence layer
to_user (any): A user based on the model specified by the
persistence layer
Returns:
str: Id of the resulting transfer action on the persistence
layer
Raises:
:exc:`~.EntityNotYetPersistedError`: If the entity being
transferred is not associated with an id on the
persistence layer (:attr:`~Entity.persist_id`) yet
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.EntityTransferError`: If the entity fails to be
transferred on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/entities.py#L409-L442
| null |
class TransferrableEntity(Entity):
"""Base class for transferable COALA IP entity models.
Provides functionality for transferrable entities through
:meth:`transfer`
"""
|
COALAIP/pycoalaip
|
coalaip/entities.py
|
Right.transfer
|
python
|
def transfer(self, rights_assignment_data=None, *, from_user, to_user,
rights_assignment_format='jsonld'):
rights_assignment = RightsAssignment.from_data(
rights_assignment_data or {},
plugin=self.plugin)
transfer_payload = rights_assignment._to_format(
data_format=rights_assignment_format)
transfer_id = super().transfer(transfer_payload, from_user=from_user,
to_user=to_user)
rights_assignment.persist_id = transfer_id
return rights_assignment
|
Transfer this Right to another owner on the backing
persistence layer.
Args:
rights_assignment_data (dict): Model data for the resulting
:class:`~.RightsAssignment`
from_user (any, keyword): A user based on the model specified
by the persistence layer
to_user (any, keyword): A user based on the model specified
by the persistence layer
rights_assignment_format (str, keyword, optional): Data
format of the created entity; must be one of:
- 'jsonld' (default)
- 'json'
- 'ipld'
Returns:
:class:`~.RightsAssignment`: The RightsAssignment entity
created from this transfer
Raises:
See :meth:`~.TransferrableEntity.transfer`
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/entities.py#L506-L542
|
[
"def from_data(cls, data, *, data_format=DataFormat.jsonld, plugin):\n \"\"\"Generic factory for instantiating :attr:`cls` entities\n from their model data. Entities instantiated from this factory\n have yet to be created on the backing persistence layer; see\n :meth:`create` on persisting an entity.\n\n Based on the :attr:`data_format`, the following are considered\n special keys in :attr:`data` and will have different behaviour\n depending on the ``data_type`` requested in later methods (e.g.\n :meth:`create`):\n\n - jsonld:\n - '@type' denotes the Linked Data type of the entity\n - '@context' denotes the JSON-LD context of the entity\n - '@id' denotes the JSON-LD identity of the entity\n - Otherwise:\n - 'type' denotes the Linked Data type of the entity\n\n Args:\n data (dict): Model data for the entity\n data_format (:class:`~.DataFormat` or str): Data format of\n :attr:`data`; must be a member of :class:`~.DataFormat`\n or a string equivalent.\n Defaults to jsonld.\n plugin (subclass of :class:`~.AbstractPlugin`, keyword):\n Persistence layer plugin used by generated :attr:`cls`\n\n Returns:\n :attr:`cls`: A generated :attr:`cls` entity from\n :attr:`data`\n\n Raises:\n :exc:`~.ModelDataError`: if :attr:`data` fails model\n validation\n \"\"\"\n\n def bind_get_model_kwargs(data_format):\n def get_model_kwargs(data):\n result = _extract_ld_data(data, data_format)\n model_kwargs = {k: v for (k, v) in result._asdict().items()\n if v is not None}\n return model_kwargs\n return get_model_kwargs\n\n def get_model_kwargs_from_ipld(data):\n raise NotImplementedError(('Creating entities from IPLD has not '\n 'been implemented yet.'))\n\n get_model_kwargs = _data_format_resolver(data_format, {\n 'jsonld': bind_get_model_kwargs('jsonld'),\n 'json': bind_get_model_kwargs('json'),\n 'ipld': get_model_kwargs_from_ipld,\n })\n model = cls.generate_model(**get_model_kwargs(data))\n\n return cls(model, plugin)\n",
"def _to_format(self, data_format):\n to_format = _data_format_resolver(data_format, {\n 'jsonld': self.to_jsonld,\n 'json': self.to_json,\n 'ipld': self.to_ipld,\n })\n return to_format()\n",
"def transfer(self, transfer_payload=None, *, from_user, to_user):\n \"\"\"Transfer this entity to another owner on the backing\n persistence layer\n\n Args:\n transfer_payload (dict): Payload for the transfer\n from_user (any): A user based on the model specified by the\n persistence layer\n to_user (any): A user based on the model specified by the\n persistence layer\n\n Returns:\n str: Id of the resulting transfer action on the persistence\n layer\n\n Raises:\n :exc:`~.EntityNotYetPersistedError`: If the entity being\n transferred is not associated with an id on the\n persistence layer (:attr:`~Entity.persist_id`) yet\n :exc:`~.EntityNotFoundError`: If the entity could not be\n found on the persistence layer\n :exc:`~.EntityTransferError`: If the entity fails to be\n transferred on the persistence layer\n :exc:`~.PersistenceError`: If any other unhandled error\n in the plugin occurred\n \"\"\"\n\n if self.persist_id is None:\n raise EntityNotYetPersistedError(('Entities cannot be transferred '\n 'until they have been '\n 'persisted'))\n\n return self.plugin.transfer(self.persist_id, transfer_payload,\n from_user=from_user, to_user=to_user)\n"
] |
class Right(TransferrableEntity):
"""COALA IP's Right entity. Transferrable.
A statement of entitlement (i.e. "right") to do something in
relation to a :class:`~.Work` or :class:`~.Manifestation`.
More specific rights, such as ``PlaybackRights``, ``StreamRights``,
etc should be implemented as subclasses of this class.
By default, :class:`~.Rights` entities are of @type 'Right' and
only include the COALA IP context, as Rights are not dependent on
schema.org.
"""
@classmethod
def generate_model(cls, *args, **kwargs):
"""Generate a Work model.
See :meth:`~.Entity.generate_model` for more details.
"""
return right_model_factory(*args, **kwargs)
|
COALAIP/pycoalaip
|
coalaip/models.py
|
work_model_factory
|
python
|
def work_model_factory(*, validator=validators.is_work_model, **kwargs):
kwargs['ld_type'] = 'AbstractWork'
return _model_factory(validator=validator, **kwargs)
|
Generate a Work model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword
argument is given.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L241-L252
|
[
"def _model_factory(*, data=None, model_cls=Model, **kwargs):\n return model_cls(data=data, **kwargs)\n"
] |
"""Low level data models for COALA IP entities.
Encapsulates the data modelling of COALA IP entities. Supports
model validation and the loading of data from a backing persistence
layer.
.. note:: This module should not be used directly to generate models,
unless you are extending the built-ins for your own
extensions. Instead, use the models that are contained in the
entities (:mod:`.entities`) returned from the high-level
functions (:mod:`.coalaip`).
.. warning:: The immutability guarantees given in this module are
best-effort. There is no general way to achieve
immutability in Python, but we try our hardest to make it
so.
"""
import attr
import coalaip.model_validators as validators
from copy import copy
from functools import wraps
from types import MappingProxyType
from coalaip import context_urls
from coalaip.data_formats import _extract_ld_data, _make_context_immutable
from coalaip.exceptions import (
ModelError,
ModelDataError,
ModelNotYetLoadedError,
)
from coalaip.utils import PostInitImmutable
def get_default_ld_context():
return [context_urls.COALAIP, context_urls.SCHEMA]
DEFAULT_DATA_VALIDATOR = attr.validators.instance_of(MappingProxyType)
@attr.s(frozen=True, repr=False)
class Model:
"""Basic data model class for COALA IP entities. Includes Linked
Data (JSON-LD) specifics.
**Immutable (see :class:`~.PostInitImmutable` and attributes)**.
Initialization may throw if attribute validation fails.
Attributes:
data (dict): Model data. Uses :attr:`validator` for validation.
ld_type (str): @type of the entity
ld_id (str): @id of the entity
ld_context (str or dict or [str|dict], keyword): "@context" for
the entity as either a string URL or array of string URLs or
dictionaries. See the `JSON-LD spec on contexts
<https://www.w3.org/TR/json-ld/#the-context>`_ for more
information.
validator (callable): A validator complying to :mod:`attr`'s
`validator API <https://attrs.readthedocs.io/en/stable/examples.html#validators>`_
that will validate :attr:`data`
"""
data = attr.ib(convert=lambda data: MappingProxyType(copy(data)),
validator=validators.use_model_attr('validator'))
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_id = attr.ib(default='', validator=attr.validators.instance_of(str))
ld_context = attr.ib(default=attr.Factory(get_default_ld_context),
convert=_make_context_immutable)
validator = attr.ib(default=DEFAULT_DATA_VALIDATOR,
validator=validators.is_callable)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.data,
)
@attr.s(init=False, repr=False)
class LazyLoadableModel(PostInitImmutable):
"""Lazy loadable data model class for COALA IP entities.
**Immutable (see :class:`.PostInitImmutable` and attributes)**.
Similar to :class:`~.Model`, except it allows the model data to be
lazily loaded afterwards from a backing persistence layer through a
plugin.
Attributes:
loaded_model (:class:`~.Model`): Loaded model from a backing
persistence layer. Initially ``None``.
Not initable.
Note that this attribute is only immutable after it's been
set once after initialization (e.g. after :meth:`load`).
ld_type: See :attr:`~.Model.ld_type`
ld_context: See :attr:`~.Model.ld_context`
validator: See :attr:`~.Model.validator`
"""
# See __init__() for defaults
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_context = attr.ib()
validator = attr.ib(validator=validators.is_callable)
loaded_model = attr.ib(init=False)
def __init__(self, ld_type, ld_id=None, ld_context=None,
validator=DEFAULT_DATA_VALIDATOR, data=None):
"""Initialize a :class:`~.LazyLoadableModel` instance.
If a :attr:`data` is provided, a :class:`Model` is generated
as the instance's :attr:`~.LazyLoadableModel.loaded_model` using
the given arguments.
Ignores :attr:`ld_id`, see the :meth:`ld_id` property instead.
"""
self.ld_type = ld_type
self.ld_context = _make_context_immutable(ld_context or
get_default_ld_context())
self.validator = validator
self.loaded_model = None
attr.validate(self)
if data:
self.loaded_model = Model(data=data, ld_type=self.ld_type,
ld_context=self.ld_context,
validator=self.validator)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.loaded_model.data if self.loaded_model else 'Not loaded',
)
@property
def data(self):
"""dict: Model data.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.data
@property
def ld_id(self):
"""str: @id of the entity.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.ld_id
def load(self, persist_id, *, plugin):
"""Load the :attr:`~.LazyLoadableModel.loaded_model` of this
instance. Noop if model was already loaded.
Args:
persist_id (str): Id of this model on the persistence layer
plugin (subclass of :class:`~.AbstractPlugin`): Persistence
layer plugin to load from
Raises:
:exc:`~.ModelDataError`: If the loaded entity's data fails
validation from :attr:`~.LazyLoadableEntity.validator`
or its type or context differs from their expected
values
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
if self.loaded_model:
return
persist_data = plugin.load(persist_id)
extracted_ld_result = _extract_ld_data(persist_data)
loaded_data = extracted_ld_result.data
loaded_type = extracted_ld_result.ld_type
loaded_id = extracted_ld_result.ld_id
loaded_context = extracted_ld_result.ld_context
# Sanity check the loaded type and context
if loaded_type and loaded_type != self.ld_type:
raise ModelDataError(
("Loaded @type ('{loaded_type}') differs from entity's "
"@type ('{self_type})'").format(loaded_type=loaded_type,
self_type=self.ld_type)
)
if loaded_context and list(loaded_context) != list(self.ld_context):
raise ModelDataError(
("Loaded context ('{loaded_ctx}') differs from entity's "
"context ('{self_ctx}')").format(loaded_ctx=loaded_context,
self_ctx=self.ld_context)
)
kwargs = {
'data': loaded_data,
'validator': self.validator,
'ld_type': self.ld_type,
'ld_context': self.ld_context,
}
if loaded_id:
kwargs['ld_id'] = loaded_id
self.loaded_model = Model(**kwargs)
def _model_factory(*, data=None, model_cls=Model, **kwargs):
return model_cls(data=data, **kwargs)
def _raise_if_not_given_ld_type(strict_ld_type, *, for_model):
def decorator(func):
@wraps(func)
def raise_if_not_given_type(*args, **kwargs):
ld_type = kwargs.get('ld_type')
if ld_type is not None and ld_type != strict_ld_type:
raise ModelError("{model_name} models must be of '@type' "
"'{strict_type}. Given '{given_type}'"
.format(model_name=for_model,
strict_type=strict_ld_type,
given_type=ld_type))
return func(*args, **kwargs)
return raise_if_not_given_type
return decorator
@_raise_if_not_given_ld_type('AbstractWork', for_model='Work')
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
"""Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
def right_model_factory(*, validator=validators.is_right_model,
ld_type='Right', **kwargs):
"""Generate a Right model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
@_raise_if_not_given_ld_type('Copyright', for_model='Copyright')
def copyright_model_factory(*, validator=validators.is_copyright_model,
**kwargs):
"""Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs)
@_raise_if_not_given_ld_type('RightsTransferAction',
for_model='RightsAssignment')
def rights_assignment_model_factory(**kwargs):
"""Generate a RightsAssignment model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'RightsTransferAction' ``ld_type``
keyword argument is given.
"""
kwargs['ld_type'] = 'RightsTransferAction'
return _model_factory(**kwargs)
|
COALAIP/pycoalaip
|
coalaip/models.py
|
manifestation_model_factory
|
python
|
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
|
Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L255-L262
|
[
"def _model_factory(*, data=None, model_cls=Model, **kwargs):\n return model_cls(data=data, **kwargs)\n"
] |
"""Low level data models for COALA IP entities.
Encapsulates the data modelling of COALA IP entities. Supports
model validation and the loading of data from a backing persistence
layer.
.. note:: This module should not be used directly to generate models,
unless you are extending the built-ins for your own
extensions. Instead, use the models that are contained in the
entities (:mod:`.entities`) returned from the high-level
functions (:mod:`.coalaip`).
.. warning:: The immutability guarantees given in this module are
best-effort. There is no general way to achieve
immutability in Python, but we try our hardest to make it
so.
"""
import attr
import coalaip.model_validators as validators
from copy import copy
from functools import wraps
from types import MappingProxyType
from coalaip import context_urls
from coalaip.data_formats import _extract_ld_data, _make_context_immutable
from coalaip.exceptions import (
ModelError,
ModelDataError,
ModelNotYetLoadedError,
)
from coalaip.utils import PostInitImmutable
def get_default_ld_context():
return [context_urls.COALAIP, context_urls.SCHEMA]
DEFAULT_DATA_VALIDATOR = attr.validators.instance_of(MappingProxyType)
@attr.s(frozen=True, repr=False)
class Model:
"""Basic data model class for COALA IP entities. Includes Linked
Data (JSON-LD) specifics.
**Immutable (see :class:`~.PostInitImmutable` and attributes)**.
Initialization may throw if attribute validation fails.
Attributes:
data (dict): Model data. Uses :attr:`validator` for validation.
ld_type (str): @type of the entity
ld_id (str): @id of the entity
ld_context (str or dict or [str|dict], keyword): "@context" for
the entity as either a string URL or array of string URLs or
dictionaries. See the `JSON-LD spec on contexts
<https://www.w3.org/TR/json-ld/#the-context>`_ for more
information.
validator (callable): A validator complying to :mod:`attr`'s
`validator API <https://attrs.readthedocs.io/en/stable/examples.html#validators>`_
that will validate :attr:`data`
"""
data = attr.ib(convert=lambda data: MappingProxyType(copy(data)),
validator=validators.use_model_attr('validator'))
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_id = attr.ib(default='', validator=attr.validators.instance_of(str))
ld_context = attr.ib(default=attr.Factory(get_default_ld_context),
convert=_make_context_immutable)
validator = attr.ib(default=DEFAULT_DATA_VALIDATOR,
validator=validators.is_callable)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.data,
)
@attr.s(init=False, repr=False)
class LazyLoadableModel(PostInitImmutable):
"""Lazy loadable data model class for COALA IP entities.
**Immutable (see :class:`.PostInitImmutable` and attributes)**.
Similar to :class:`~.Model`, except it allows the model data to be
lazily loaded afterwards from a backing persistence layer through a
plugin.
Attributes:
loaded_model (:class:`~.Model`): Loaded model from a backing
persistence layer. Initially ``None``.
Not initable.
Note that this attribute is only immutable after it's been
set once after initialization (e.g. after :meth:`load`).
ld_type: See :attr:`~.Model.ld_type`
ld_context: See :attr:`~.Model.ld_context`
validator: See :attr:`~.Model.validator`
"""
# See __init__() for defaults
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_context = attr.ib()
validator = attr.ib(validator=validators.is_callable)
loaded_model = attr.ib(init=False)
def __init__(self, ld_type, ld_id=None, ld_context=None,
validator=DEFAULT_DATA_VALIDATOR, data=None):
"""Initialize a :class:`~.LazyLoadableModel` instance.
If a :attr:`data` is provided, a :class:`Model` is generated
as the instance's :attr:`~.LazyLoadableModel.loaded_model` using
the given arguments.
Ignores :attr:`ld_id`, see the :meth:`ld_id` property instead.
"""
self.ld_type = ld_type
self.ld_context = _make_context_immutable(ld_context or
get_default_ld_context())
self.validator = validator
self.loaded_model = None
attr.validate(self)
if data:
self.loaded_model = Model(data=data, ld_type=self.ld_type,
ld_context=self.ld_context,
validator=self.validator)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.loaded_model.data if self.loaded_model else 'Not loaded',
)
@property
def data(self):
"""dict: Model data.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.data
@property
def ld_id(self):
"""str: @id of the entity.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.ld_id
def load(self, persist_id, *, plugin):
"""Load the :attr:`~.LazyLoadableModel.loaded_model` of this
instance. Noop if model was already loaded.
Args:
persist_id (str): Id of this model on the persistence layer
plugin (subclass of :class:`~.AbstractPlugin`): Persistence
layer plugin to load from
Raises:
:exc:`~.ModelDataError`: If the loaded entity's data fails
validation from :attr:`~.LazyLoadableEntity.validator`
or its type or context differs from their expected
values
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
if self.loaded_model:
return
persist_data = plugin.load(persist_id)
extracted_ld_result = _extract_ld_data(persist_data)
loaded_data = extracted_ld_result.data
loaded_type = extracted_ld_result.ld_type
loaded_id = extracted_ld_result.ld_id
loaded_context = extracted_ld_result.ld_context
# Sanity check the loaded type and context
if loaded_type and loaded_type != self.ld_type:
raise ModelDataError(
("Loaded @type ('{loaded_type}') differs from entity's "
"@type ('{self_type})'").format(loaded_type=loaded_type,
self_type=self.ld_type)
)
if loaded_context and list(loaded_context) != list(self.ld_context):
raise ModelDataError(
("Loaded context ('{loaded_ctx}') differs from entity's "
"context ('{self_ctx}')").format(loaded_ctx=loaded_context,
self_ctx=self.ld_context)
)
kwargs = {
'data': loaded_data,
'validator': self.validator,
'ld_type': self.ld_type,
'ld_context': self.ld_context,
}
if loaded_id:
kwargs['ld_id'] = loaded_id
self.loaded_model = Model(**kwargs)
def _model_factory(*, data=None, model_cls=Model, **kwargs):
return model_cls(data=data, **kwargs)
def _raise_if_not_given_ld_type(strict_ld_type, *, for_model):
def decorator(func):
@wraps(func)
def raise_if_not_given_type(*args, **kwargs):
ld_type = kwargs.get('ld_type')
if ld_type is not None and ld_type != strict_ld_type:
raise ModelError("{model_name} models must be of '@type' "
"'{strict_type}. Given '{given_type}'"
.format(model_name=for_model,
strict_type=strict_ld_type,
given_type=ld_type))
return func(*args, **kwargs)
return raise_if_not_given_type
return decorator
@_raise_if_not_given_ld_type('AbstractWork', for_model='Work')
def work_model_factory(*, validator=validators.is_work_model, **kwargs):
"""Generate a Work model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'AbstractWork'
return _model_factory(validator=validator, **kwargs)
def right_model_factory(*, validator=validators.is_right_model,
ld_type='Right', **kwargs):
"""Generate a Right model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
@_raise_if_not_given_ld_type('Copyright', for_model='Copyright')
def copyright_model_factory(*, validator=validators.is_copyright_model,
**kwargs):
"""Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs)
@_raise_if_not_given_ld_type('RightsTransferAction',
for_model='RightsAssignment')
def rights_assignment_model_factory(**kwargs):
"""Generate a RightsAssignment model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'RightsTransferAction' ``ld_type``
keyword argument is given.
"""
kwargs['ld_type'] = 'RightsTransferAction'
return _model_factory(**kwargs)
|
COALAIP/pycoalaip
|
coalaip/models.py
|
right_model_factory
|
python
|
def right_model_factory(*, validator=validators.is_right_model,
ld_type='Right', **kwargs):
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
|
Generate a Right model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L265-L272
|
[
"def _model_factory(*, data=None, model_cls=Model, **kwargs):\n return model_cls(data=data, **kwargs)\n"
] |
"""Low level data models for COALA IP entities.
Encapsulates the data modelling of COALA IP entities. Supports
model validation and the loading of data from a backing persistence
layer.
.. note:: This module should not be used directly to generate models,
unless you are extending the built-ins for your own
extensions. Instead, use the models that are contained in the
entities (:mod:`.entities`) returned from the high-level
functions (:mod:`.coalaip`).
.. warning:: The immutability guarantees given in this module are
best-effort. There is no general way to achieve
immutability in Python, but we try our hardest to make it
so.
"""
import attr
import coalaip.model_validators as validators
from copy import copy
from functools import wraps
from types import MappingProxyType
from coalaip import context_urls
from coalaip.data_formats import _extract_ld_data, _make_context_immutable
from coalaip.exceptions import (
ModelError,
ModelDataError,
ModelNotYetLoadedError,
)
from coalaip.utils import PostInitImmutable
def get_default_ld_context():
return [context_urls.COALAIP, context_urls.SCHEMA]
DEFAULT_DATA_VALIDATOR = attr.validators.instance_of(MappingProxyType)
@attr.s(frozen=True, repr=False)
class Model:
"""Basic data model class for COALA IP entities. Includes Linked
Data (JSON-LD) specifics.
**Immutable (see :class:`~.PostInitImmutable` and attributes)**.
Initialization may throw if attribute validation fails.
Attributes:
data (dict): Model data. Uses :attr:`validator` for validation.
ld_type (str): @type of the entity
ld_id (str): @id of the entity
ld_context (str or dict or [str|dict], keyword): "@context" for
the entity as either a string URL or array of string URLs or
dictionaries. See the `JSON-LD spec on contexts
<https://www.w3.org/TR/json-ld/#the-context>`_ for more
information.
validator (callable): A validator complying to :mod:`attr`'s
`validator API <https://attrs.readthedocs.io/en/stable/examples.html#validators>`_
that will validate :attr:`data`
"""
data = attr.ib(convert=lambda data: MappingProxyType(copy(data)),
validator=validators.use_model_attr('validator'))
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_id = attr.ib(default='', validator=attr.validators.instance_of(str))
ld_context = attr.ib(default=attr.Factory(get_default_ld_context),
convert=_make_context_immutable)
validator = attr.ib(default=DEFAULT_DATA_VALIDATOR,
validator=validators.is_callable)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.data,
)
@attr.s(init=False, repr=False)
class LazyLoadableModel(PostInitImmutable):
"""Lazy loadable data model class for COALA IP entities.
**Immutable (see :class:`.PostInitImmutable` and attributes)**.
Similar to :class:`~.Model`, except it allows the model data to be
lazily loaded afterwards from a backing persistence layer through a
plugin.
Attributes:
loaded_model (:class:`~.Model`): Loaded model from a backing
persistence layer. Initially ``None``.
Not initable.
Note that this attribute is only immutable after it's been
set once after initialization (e.g. after :meth:`load`).
ld_type: See :attr:`~.Model.ld_type`
ld_context: See :attr:`~.Model.ld_context`
validator: See :attr:`~.Model.validator`
"""
# See __init__() for defaults
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_context = attr.ib()
validator = attr.ib(validator=validators.is_callable)
loaded_model = attr.ib(init=False)
def __init__(self, ld_type, ld_id=None, ld_context=None,
validator=DEFAULT_DATA_VALIDATOR, data=None):
"""Initialize a :class:`~.LazyLoadableModel` instance.
If a :attr:`data` is provided, a :class:`Model` is generated
as the instance's :attr:`~.LazyLoadableModel.loaded_model` using
the given arguments.
Ignores :attr:`ld_id`, see the :meth:`ld_id` property instead.
"""
self.ld_type = ld_type
self.ld_context = _make_context_immutable(ld_context or
get_default_ld_context())
self.validator = validator
self.loaded_model = None
attr.validate(self)
if data:
self.loaded_model = Model(data=data, ld_type=self.ld_type,
ld_context=self.ld_context,
validator=self.validator)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.loaded_model.data if self.loaded_model else 'Not loaded',
)
@property
def data(self):
"""dict: Model data.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.data
@property
def ld_id(self):
"""str: @id of the entity.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.ld_id
def load(self, persist_id, *, plugin):
"""Load the :attr:`~.LazyLoadableModel.loaded_model` of this
instance. Noop if model was already loaded.
Args:
persist_id (str): Id of this model on the persistence layer
plugin (subclass of :class:`~.AbstractPlugin`): Persistence
layer plugin to load from
Raises:
:exc:`~.ModelDataError`: If the loaded entity's data fails
validation from :attr:`~.LazyLoadableEntity.validator`
or its type or context differs from their expected
values
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
if self.loaded_model:
return
persist_data = plugin.load(persist_id)
extracted_ld_result = _extract_ld_data(persist_data)
loaded_data = extracted_ld_result.data
loaded_type = extracted_ld_result.ld_type
loaded_id = extracted_ld_result.ld_id
loaded_context = extracted_ld_result.ld_context
# Sanity check the loaded type and context
if loaded_type and loaded_type != self.ld_type:
raise ModelDataError(
("Loaded @type ('{loaded_type}') differs from entity's "
"@type ('{self_type})'").format(loaded_type=loaded_type,
self_type=self.ld_type)
)
if loaded_context and list(loaded_context) != list(self.ld_context):
raise ModelDataError(
("Loaded context ('{loaded_ctx}') differs from entity's "
"context ('{self_ctx}')").format(loaded_ctx=loaded_context,
self_ctx=self.ld_context)
)
kwargs = {
'data': loaded_data,
'validator': self.validator,
'ld_type': self.ld_type,
'ld_context': self.ld_context,
}
if loaded_id:
kwargs['ld_id'] = loaded_id
self.loaded_model = Model(**kwargs)
def _model_factory(*, data=None, model_cls=Model, **kwargs):
return model_cls(data=data, **kwargs)
def _raise_if_not_given_ld_type(strict_ld_type, *, for_model):
def decorator(func):
@wraps(func)
def raise_if_not_given_type(*args, **kwargs):
ld_type = kwargs.get('ld_type')
if ld_type is not None and ld_type != strict_ld_type:
raise ModelError("{model_name} models must be of '@type' "
"'{strict_type}. Given '{given_type}'"
.format(model_name=for_model,
strict_type=strict_ld_type,
given_type=ld_type))
return func(*args, **kwargs)
return raise_if_not_given_type
return decorator
@_raise_if_not_given_ld_type('AbstractWork', for_model='Work')
def work_model_factory(*, validator=validators.is_work_model, **kwargs):
"""Generate a Work model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'AbstractWork'
return _model_factory(validator=validator, **kwargs)
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
"""Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
@_raise_if_not_given_ld_type('Copyright', for_model='Copyright')
def copyright_model_factory(*, validator=validators.is_copyright_model,
**kwargs):
"""Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs)
@_raise_if_not_given_ld_type('RightsTransferAction',
for_model='RightsAssignment')
def rights_assignment_model_factory(**kwargs):
"""Generate a RightsAssignment model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'RightsTransferAction' ``ld_type``
keyword argument is given.
"""
kwargs['ld_type'] = 'RightsTransferAction'
return _model_factory(**kwargs)
|
COALAIP/pycoalaip
|
coalaip/models.py
|
copyright_model_factory
|
python
|
def copyright_model_factory(*, validator=validators.is_copyright_model,
**kwargs):
kwargs['ld_type'] = 'Copyright'
return _model_factory(validator=validator, **kwargs)
|
Generate a Copyright model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'Copyright' ``ld_type`` keyword
argument is given.
|
train
|
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/models.py#L276-L288
|
[
"def _model_factory(*, data=None, model_cls=Model, **kwargs):\n return model_cls(data=data, **kwargs)\n"
] |
"""Low level data models for COALA IP entities.
Encapsulates the data modelling of COALA IP entities. Supports
model validation and the loading of data from a backing persistence
layer.
.. note:: This module should not be used directly to generate models,
unless you are extending the built-ins for your own
extensions. Instead, use the models that are contained in the
entities (:mod:`.entities`) returned from the high-level
functions (:mod:`.coalaip`).
.. warning:: The immutability guarantees given in this module are
best-effort. There is no general way to achieve
immutability in Python, but we try our hardest to make it
so.
"""
import attr
import coalaip.model_validators as validators
from copy import copy
from functools import wraps
from types import MappingProxyType
from coalaip import context_urls
from coalaip.data_formats import _extract_ld_data, _make_context_immutable
from coalaip.exceptions import (
ModelError,
ModelDataError,
ModelNotYetLoadedError,
)
from coalaip.utils import PostInitImmutable
def get_default_ld_context():
return [context_urls.COALAIP, context_urls.SCHEMA]
DEFAULT_DATA_VALIDATOR = attr.validators.instance_of(MappingProxyType)
@attr.s(frozen=True, repr=False)
class Model:
"""Basic data model class for COALA IP entities. Includes Linked
Data (JSON-LD) specifics.
**Immutable (see :class:`~.PostInitImmutable` and attributes)**.
Initialization may throw if attribute validation fails.
Attributes:
data (dict): Model data. Uses :attr:`validator` for validation.
ld_type (str): @type of the entity
ld_id (str): @id of the entity
ld_context (str or dict or [str|dict], keyword): "@context" for
the entity as either a string URL or array of string URLs or
dictionaries. See the `JSON-LD spec on contexts
<https://www.w3.org/TR/json-ld/#the-context>`_ for more
information.
validator (callable): A validator complying to :mod:`attr`'s
`validator API <https://attrs.readthedocs.io/en/stable/examples.html#validators>`_
that will validate :attr:`data`
"""
data = attr.ib(convert=lambda data: MappingProxyType(copy(data)),
validator=validators.use_model_attr('validator'))
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_id = attr.ib(default='', validator=attr.validators.instance_of(str))
ld_context = attr.ib(default=attr.Factory(get_default_ld_context),
convert=_make_context_immutable)
validator = attr.ib(default=DEFAULT_DATA_VALIDATOR,
validator=validators.is_callable)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.data,
)
@attr.s(init=False, repr=False)
class LazyLoadableModel(PostInitImmutable):
"""Lazy loadable data model class for COALA IP entities.
**Immutable (see :class:`.PostInitImmutable` and attributes)**.
Similar to :class:`~.Model`, except it allows the model data to be
lazily loaded afterwards from a backing persistence layer through a
plugin.
Attributes:
loaded_model (:class:`~.Model`): Loaded model from a backing
persistence layer. Initially ``None``.
Not initable.
Note that this attribute is only immutable after it's been
set once after initialization (e.g. after :meth:`load`).
ld_type: See :attr:`~.Model.ld_type`
ld_context: See :attr:`~.Model.ld_context`
validator: See :attr:`~.Model.validator`
"""
# See __init__() for defaults
ld_type = attr.ib(validator=attr.validators.instance_of(str))
ld_context = attr.ib()
validator = attr.ib(validator=validators.is_callable)
loaded_model = attr.ib(init=False)
def __init__(self, ld_type, ld_id=None, ld_context=None,
validator=DEFAULT_DATA_VALIDATOR, data=None):
"""Initialize a :class:`~.LazyLoadableModel` instance.
If a :attr:`data` is provided, a :class:`Model` is generated
as the instance's :attr:`~.LazyLoadableModel.loaded_model` using
the given arguments.
Ignores :attr:`ld_id`, see the :meth:`ld_id` property instead.
"""
self.ld_type = ld_type
self.ld_context = _make_context_immutable(ld_context or
get_default_ld_context())
self.validator = validator
self.loaded_model = None
attr.validate(self)
if data:
self.loaded_model = Model(data=data, ld_type=self.ld_type,
ld_context=self.ld_context,
validator=self.validator)
def __repr__(self):
return '{name}(type={type}, context={context}, data={data})'.format(
name=self.__class__.__name__,
type=self.ld_type,
context=self.ld_context,
data=self.loaded_model.data if self.loaded_model else 'Not loaded',
)
@property
def data(self):
"""dict: Model data.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.data
@property
def ld_id(self):
"""str: @id of the entity.
Raises :exc:`~.ModelNotYetLoadedError` if the data has not been
loaded yet.
"""
if self.loaded_model is None:
raise ModelNotYetLoadedError()
return self.loaded_model.ld_id
def load(self, persist_id, *, plugin):
"""Load the :attr:`~.LazyLoadableModel.loaded_model` of this
instance. Noop if model was already loaded.
Args:
persist_id (str): Id of this model on the persistence layer
plugin (subclass of :class:`~.AbstractPlugin`): Persistence
layer plugin to load from
Raises:
:exc:`~.ModelDataError`: If the loaded entity's data fails
validation from :attr:`~.LazyLoadableEntity.validator`
or its type or context differs from their expected
values
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
"""
if self.loaded_model:
return
persist_data = plugin.load(persist_id)
extracted_ld_result = _extract_ld_data(persist_data)
loaded_data = extracted_ld_result.data
loaded_type = extracted_ld_result.ld_type
loaded_id = extracted_ld_result.ld_id
loaded_context = extracted_ld_result.ld_context
# Sanity check the loaded type and context
if loaded_type and loaded_type != self.ld_type:
raise ModelDataError(
("Loaded @type ('{loaded_type}') differs from entity's "
"@type ('{self_type})'").format(loaded_type=loaded_type,
self_type=self.ld_type)
)
if loaded_context and list(loaded_context) != list(self.ld_context):
raise ModelDataError(
("Loaded context ('{loaded_ctx}') differs from entity's "
"context ('{self_ctx}')").format(loaded_ctx=loaded_context,
self_ctx=self.ld_context)
)
kwargs = {
'data': loaded_data,
'validator': self.validator,
'ld_type': self.ld_type,
'ld_context': self.ld_context,
}
if loaded_id:
kwargs['ld_id'] = loaded_id
self.loaded_model = Model(**kwargs)
def _model_factory(*, data=None, model_cls=Model, **kwargs):
return model_cls(data=data, **kwargs)
def _raise_if_not_given_ld_type(strict_ld_type, *, for_model):
def decorator(func):
@wraps(func)
def raise_if_not_given_type(*args, **kwargs):
ld_type = kwargs.get('ld_type')
if ld_type is not None and ld_type != strict_ld_type:
raise ModelError("{model_name} models must be of '@type' "
"'{strict_type}. Given '{given_type}'"
.format(model_name=for_model,
strict_type=strict_ld_type,
given_type=ld_type))
return func(*args, **kwargs)
return raise_if_not_given_type
return decorator
@_raise_if_not_given_ld_type('AbstractWork', for_model='Work')
def work_model_factory(*, validator=validators.is_work_model, **kwargs):
"""Generate a Work model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'AbstractWork' ``ld_type`` keyword
argument is given.
"""
kwargs['ld_type'] = 'AbstractWork'
return _model_factory(validator=validator, **kwargs)
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
"""Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
def right_model_factory(*, validator=validators.is_right_model,
ld_type='Right', **kwargs):
"""Generate a Right model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
@_raise_if_not_given_ld_type('Copyright', for_model='Copyright')
@_raise_if_not_given_ld_type('RightsTransferAction',
for_model='RightsAssignment')
def rights_assignment_model_factory(**kwargs):
"""Generate a RightsAssignment model.
Expects ``data``, ``validator``, ``model_cls``, and ``ld_context``
as keyword arguments.
Raises:
:exc:`ModelError`: If a non-'RightsTransferAction' ``ld_type``
keyword argument is given.
"""
kwargs['ld_type'] = 'RightsTransferAction'
return _model_factory(**kwargs)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
color_gen
|
python
|
def color_gen(colormap='viridis', key=None, n=15):
if colormap in dir(bpal):
palette = getattr(bpal, colormap)
if isinstance(palette, dict):
if key is None:
key = list(palette.keys())[0]
palette = palette[key]
elif callable(palette):
palette = palette(n)
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
elif isinstance(colormap, (list, tuple)):
palette = colormap
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
yield from itertools.cycle(palette)
|
Color generator for Bokeh plots
Parameters
----------
colormap: str, sequence
The name of the color map
Returns
-------
generator
A generator for the color palette
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L763-L796
| null |
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
A Python wrapper for the SVO Filter Profile Service
"""
from glob import glob
import inspect
import os
import pickle
from pkg_resources import resource_filename
import warnings
import itertools
import astropy.table as at
import astropy.io.votable as vo
import astropy.units as q
import astropy.constants as ac
from astropy.utils.exceptions import AstropyWarning
from bokeh.plotting import figure, show
import bokeh.palettes as bpal
import numpy as np
warnings.simplefilter('ignore', category=AstropyWarning)
EXTINCTION = {'PS1.g': 3.384, 'PS1.r': 2.483, 'PS1.i': 1.838, 'PS1.z': 1.414,
'PS1.y': 1.126, 'SDSS.u': 4.0, 'SDSS.g': 3.384, 'SDSS.r': 2.483,
'SDSS.i': 1.838, 'SDSS.z': 1.414, '2MASS.J': 0.650,
'2MASS.H': 0.327, '2MASS.Ks': 0.161}
SYSTEMATICS = {}
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
def filters(filter_directory=None, update=False, fmt='table', **kwargs):
"""
Get a list of the available filters
Parameters
----------
filter_directory: str
The directory containing the filter relative spectral response curves
update: bool
Check the filter directory for new filters and generate pickle of table
fmt: str
The format for the returned table
Returns
-------
list
The list of band names
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Get the pickle path and make sure file exists
p_path = os.path.join(filter_directory, 'filter_list.p')
updated = False
if not os.path.isfile(p_path):
os.system('touch {}'.format(p_path))
if update:
print('Loading filters into table...')
# Get all the filters (except the pickle)
files = glob(filter_directory+'*')
files = [f for f in files if not f.endswith('.p')]
bands = [os.path.basename(b) for b in files]
tables = []
for band in bands:
# Load the filter
band = band.replace('.txt', '')
filt = Filter(band, **kwargs)
filt.Band = band
# Put metadata into table with correct dtypes
info = filt.info(True)
vals = [float(i) if i.replace('.', '').replace('-', '')
.replace('+', '').isnumeric() else i
for i in info['Values']]
dtypes = np.array([type(i) for i in vals])
table = at.Table(np.array([vals]), names=info['Attributes'],
dtype=dtypes)
tables.append(table)
del filt, info, table
# Write to the pickle
with open(p_path, 'wb') as file:
pickle.dump(at.vstack(tables), file)
# Load the saved pickle
data = {}
if os.path.isfile(p_path):
with open(p_path, 'rb') as file:
data = pickle.load(file)
# Return the data
if data:
if fmt == 'dict':
data = {r[0]: {k: r[k].value if hasattr(r[k], 'unit') else r[k]
for k in data.keys()[1:]} for r in data}
else:
# Add Band as index
data.add_index('Band')
return data
# Or try to generate it once
else:
if not updated:
updated = True
filters(update=True)
else:
print('No filters found in', filter_directory)
def rebin_spec(spec, wavnew, oversamp=100, plot=False):
"""
Rebin a spectrum to a new wavelength array while preserving
the total flux
Parameters
----------
spec: array-like
The wavelength and flux to be binned
wavenew: array-like
The new wavelength array
Returns
-------
np.ndarray
The rebinned flux
"""
wave, flux = spec
nlam = len(wave)
x0 = np.arange(nlam, dtype=float)
x0int = np.arange((nlam-1.) * oversamp + 1., dtype=float)/oversamp
w0int = np.interp(x0int, x0, wave)
spec0int = np.interp(w0int, wave, flux)/oversamp
# Set up the bin edges for down-binning
maxdiffw1 = np.diff(wavnew).max()
w1bins = np.concatenate(([wavnew[0]-maxdiffw1],
.5*(wavnew[1::]+wavnew[0: -1]),
[wavnew[-1]+maxdiffw1]))
# Bin down the interpolated spectrum:
w1bins = np.sort(w1bins)
nbins = len(w1bins)-1
specnew = np.zeros(nbins)
inds2 = [[w0int.searchsorted(w1bins[ii], side='left'),
w0int.searchsorted(w1bins[ii+1], side='left')]
for ii in range(nbins)]
for ii in range(nbins):
specnew[ii] = np.sum(spec0int[inds2[ii][0]: inds2[ii][1]])
return specnew
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
filters
|
python
|
def filters(filter_directory=None, update=False, fmt='table', **kwargs):
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Get the pickle path and make sure file exists
p_path = os.path.join(filter_directory, 'filter_list.p')
updated = False
if not os.path.isfile(p_path):
os.system('touch {}'.format(p_path))
if update:
print('Loading filters into table...')
# Get all the filters (except the pickle)
files = glob(filter_directory+'*')
files = [f for f in files if not f.endswith('.p')]
bands = [os.path.basename(b) for b in files]
tables = []
for band in bands:
# Load the filter
band = band.replace('.txt', '')
filt = Filter(band, **kwargs)
filt.Band = band
# Put metadata into table with correct dtypes
info = filt.info(True)
vals = [float(i) if i.replace('.', '').replace('-', '')
.replace('+', '').isnumeric() else i
for i in info['Values']]
dtypes = np.array([type(i) for i in vals])
table = at.Table(np.array([vals]), names=info['Attributes'],
dtype=dtypes)
tables.append(table)
del filt, info, table
# Write to the pickle
with open(p_path, 'wb') as file:
pickle.dump(at.vstack(tables), file)
# Load the saved pickle
data = {}
if os.path.isfile(p_path):
with open(p_path, 'rb') as file:
data = pickle.load(file)
# Return the data
if data:
if fmt == 'dict':
data = {r[0]: {k: r[k].value if hasattr(r[k], 'unit') else r[k]
for k in data.keys()[1:]} for r in data}
else:
# Add Band as index
data.add_index('Band')
return data
# Or try to generate it once
else:
if not updated:
updated = True
filters(update=True)
else:
print('No filters found in', filter_directory)
|
Get a list of the available filters
Parameters
----------
filter_directory: str
The directory containing the filter relative spectral response curves
update: bool
Check the filter directory for new filters and generate pickle of table
fmt: str
The format for the returned table
Returns
-------
list
The list of band names
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L799-L886
|
[
"def info(self, fetch=False):\n \"\"\"\n Print a table of info about the current filter\n \"\"\"\n # Get the info from the class\n tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)\n info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)\n and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]\n\n # Make the table\n table = at.Table(np.asarray(info).reshape(len(info), 2),\n names=['Attributes', 'Values'])\n\n # Sort and print\n table.sort('Attributes')\n\n if fetch:\n return table\n else:\n table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])\n"
] |
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
A Python wrapper for the SVO Filter Profile Service
"""
from glob import glob
import inspect
import os
import pickle
from pkg_resources import resource_filename
import warnings
import itertools
import astropy.table as at
import astropy.io.votable as vo
import astropy.units as q
import astropy.constants as ac
from astropy.utils.exceptions import AstropyWarning
from bokeh.plotting import figure, show
import bokeh.palettes as bpal
import numpy as np
warnings.simplefilter('ignore', category=AstropyWarning)
EXTINCTION = {'PS1.g': 3.384, 'PS1.r': 2.483, 'PS1.i': 1.838, 'PS1.z': 1.414,
'PS1.y': 1.126, 'SDSS.u': 4.0, 'SDSS.g': 3.384, 'SDSS.r': 2.483,
'SDSS.i': 1.838, 'SDSS.z': 1.414, '2MASS.J': 0.650,
'2MASS.H': 0.327, '2MASS.Ks': 0.161}
SYSTEMATICS = {}
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
def color_gen(colormap='viridis', key=None, n=15):
"""Color generator for Bokeh plots
Parameters
----------
colormap: str, sequence
The name of the color map
Returns
-------
generator
A generator for the color palette
"""
if colormap in dir(bpal):
palette = getattr(bpal, colormap)
if isinstance(palette, dict):
if key is None:
key = list(palette.keys())[0]
palette = palette[key]
elif callable(palette):
palette = palette(n)
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
elif isinstance(colormap, (list, tuple)):
palette = colormap
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
yield from itertools.cycle(palette)
def rebin_spec(spec, wavnew, oversamp=100, plot=False):
"""
Rebin a spectrum to a new wavelength array while preserving
the total flux
Parameters
----------
spec: array-like
The wavelength and flux to be binned
wavenew: array-like
The new wavelength array
Returns
-------
np.ndarray
The rebinned flux
"""
wave, flux = spec
nlam = len(wave)
x0 = np.arange(nlam, dtype=float)
x0int = np.arange((nlam-1.) * oversamp + 1., dtype=float)/oversamp
w0int = np.interp(x0int, x0, wave)
spec0int = np.interp(w0int, wave, flux)/oversamp
# Set up the bin edges for down-binning
maxdiffw1 = np.diff(wavnew).max()
w1bins = np.concatenate(([wavnew[0]-maxdiffw1],
.5*(wavnew[1::]+wavnew[0: -1]),
[wavnew[-1]+maxdiffw1]))
# Bin down the interpolated spectrum:
w1bins = np.sort(w1bins)
nbins = len(w1bins)-1
specnew = np.zeros(nbins)
inds2 = [[w0int.searchsorted(w1bins[ii], side='left'),
w0int.searchsorted(w1bins[ii+1], side='left')]
for ii in range(nbins)]
for ii in range(nbins):
specnew[ii] = np.sum(spec0int[inds2[ii][0]: inds2[ii][1]])
return specnew
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
rebin_spec
|
python
|
def rebin_spec(spec, wavnew, oversamp=100, plot=False):
wave, flux = spec
nlam = len(wave)
x0 = np.arange(nlam, dtype=float)
x0int = np.arange((nlam-1.) * oversamp + 1., dtype=float)/oversamp
w0int = np.interp(x0int, x0, wave)
spec0int = np.interp(w0int, wave, flux)/oversamp
# Set up the bin edges for down-binning
maxdiffw1 = np.diff(wavnew).max()
w1bins = np.concatenate(([wavnew[0]-maxdiffw1],
.5*(wavnew[1::]+wavnew[0: -1]),
[wavnew[-1]+maxdiffw1]))
# Bin down the interpolated spectrum:
w1bins = np.sort(w1bins)
nbins = len(w1bins)-1
specnew = np.zeros(nbins)
inds2 = [[w0int.searchsorted(w1bins[ii], side='left'),
w0int.searchsorted(w1bins[ii+1], side='left')]
for ii in range(nbins)]
for ii in range(nbins):
specnew[ii] = np.sum(spec0int[inds2[ii][0]: inds2[ii][1]])
return specnew
|
Rebin a spectrum to a new wavelength array while preserving
the total flux
Parameters
----------
spec: array-like
The wavelength and flux to be binned
wavenew: array-like
The new wavelength array
Returns
-------
np.ndarray
The rebinned flux
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L889-L931
| null |
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
A Python wrapper for the SVO Filter Profile Service
"""
from glob import glob
import inspect
import os
import pickle
from pkg_resources import resource_filename
import warnings
import itertools
import astropy.table as at
import astropy.io.votable as vo
import astropy.units as q
import astropy.constants as ac
from astropy.utils.exceptions import AstropyWarning
from bokeh.plotting import figure, show
import bokeh.palettes as bpal
import numpy as np
warnings.simplefilter('ignore', category=AstropyWarning)
EXTINCTION = {'PS1.g': 3.384, 'PS1.r': 2.483, 'PS1.i': 1.838, 'PS1.z': 1.414,
'PS1.y': 1.126, 'SDSS.u': 4.0, 'SDSS.g': 3.384, 'SDSS.r': 2.483,
'SDSS.i': 1.838, 'SDSS.z': 1.414, '2MASS.J': 0.650,
'2MASS.H': 0.327, '2MASS.Ks': 0.161}
SYSTEMATICS = {}
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
def color_gen(colormap='viridis', key=None, n=15):
"""Color generator for Bokeh plots
Parameters
----------
colormap: str, sequence
The name of the color map
Returns
-------
generator
A generator for the color palette
"""
if colormap in dir(bpal):
palette = getattr(bpal, colormap)
if isinstance(palette, dict):
if key is None:
key = list(palette.keys())[0]
palette = palette[key]
elif callable(palette):
palette = palette(n)
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
elif isinstance(colormap, (list, tuple)):
palette = colormap
else:
raise TypeError("pallette must be a bokeh palette name or a sequence of color hex values.")
yield from itertools.cycle(palette)
def filters(filter_directory=None, update=False, fmt='table', **kwargs):
"""
Get a list of the available filters
Parameters
----------
filter_directory: str
The directory containing the filter relative spectral response curves
update: bool
Check the filter directory for new filters and generate pickle of table
fmt: str
The format for the returned table
Returns
-------
list
The list of band names
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Get the pickle path and make sure file exists
p_path = os.path.join(filter_directory, 'filter_list.p')
updated = False
if not os.path.isfile(p_path):
os.system('touch {}'.format(p_path))
if update:
print('Loading filters into table...')
# Get all the filters (except the pickle)
files = glob(filter_directory+'*')
files = [f for f in files if not f.endswith('.p')]
bands = [os.path.basename(b) for b in files]
tables = []
for band in bands:
# Load the filter
band = band.replace('.txt', '')
filt = Filter(band, **kwargs)
filt.Band = band
# Put metadata into table with correct dtypes
info = filt.info(True)
vals = [float(i) if i.replace('.', '').replace('-', '')
.replace('+', '').isnumeric() else i
for i in info['Values']]
dtypes = np.array([type(i) for i in vals])
table = at.Table(np.array([vals]), names=info['Attributes'],
dtype=dtypes)
tables.append(table)
del filt, info, table
# Write to the pickle
with open(p_path, 'wb') as file:
pickle.dump(at.vstack(tables), file)
# Load the saved pickle
data = {}
if os.path.isfile(p_path):
with open(p_path, 'rb') as file:
data = pickle.load(file)
# Return the data
if data:
if fmt == 'dict':
data = {r[0]: {k: r[k].value if hasattr(r[k], 'unit') else r[k]
for k in data.keys()[1:]} for r in data}
else:
# Add Band as index
data.add_index('Band')
return data
# Or try to generate it once
else:
if not updated:
updated = True
filters(update=True)
else:
print('No filters found in', filter_directory)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.apply
|
python
|
def apply(self, spectrum, plot=False):
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
|
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L232-L324
|
[
"def color_gen(colormap='viridis', key=None, n=15):\n \"\"\"Color generator for Bokeh plots\n\n Parameters\n ----------\n colormap: str, sequence\n The name of the color map\n\n Returns\n -------\n generator\n A generator for the color palette\n \"\"\"\n if colormap in dir(bpal):\n palette = getattr(bpal, colormap)\n\n if isinstance(palette, dict):\n if key is None:\n key = list(palette.keys())[0]\n palette = palette[key]\n\n elif callable(palette):\n palette = palette(n)\n\n else:\n raise TypeError(\"pallette must be a bokeh palette name or a sequence of color hex values.\")\n\n elif isinstance(colormap, (list, tuple)):\n palette = colormap\n\n else:\n raise TypeError(\"pallette must be a bokeh palette name or a sequence of color hex values.\")\n\n yield from itertools.cycle(palette)\n"
] |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.bin
|
python
|
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
|
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L326-L376
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.centers
|
python
|
def centers(self):
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
|
A getter for the wavelength bin centers and average fluxes
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L379-L385
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.flux_units
|
python
|
def flux_units(self, units):
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
|
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L393-L415
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.info
|
python
|
def info(self, fetch=False):
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
|
Print a table of info about the current filter
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L417-L436
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.load_TopHat
|
python
|
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
|
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L438-L492
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.load_txt
|
python
|
def load_txt(self, filepath):
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
|
Load the filter from a txt file
Parameters
----------
file: str
The filepath
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L494-L546
|
[
"def rebin_spec(spec, wavnew, oversamp=100, plot=False):\n \"\"\"\n Rebin a spectrum to a new wavelength array while preserving\n the total flux\n\n Parameters\n ----------\n spec: array-like\n The wavelength and flux to be binned\n wavenew: array-like\n The new wavelength array\n\n Returns\n -------\n np.ndarray\n The rebinned flux\n\n \"\"\"\n wave, flux = spec\n nlam = len(wave)\n x0 = np.arange(nlam, dtype=float)\n x0int = np.arange((nlam-1.) * oversamp + 1., dtype=float)/oversamp\n w0int = np.interp(x0int, x0, wave)\n spec0int = np.interp(w0int, wave, flux)/oversamp\n\n # Set up the bin edges for down-binning\n maxdiffw1 = np.diff(wavnew).max()\n w1bins = np.concatenate(([wavnew[0]-maxdiffw1],\n .5*(wavnew[1::]+wavnew[0: -1]),\n [wavnew[-1]+maxdiffw1]))\n\n # Bin down the interpolated spectrum:\n w1bins = np.sort(w1bins)\n nbins = len(w1bins)-1\n specnew = np.zeros(nbins)\n inds2 = [[w0int.searchsorted(w1bins[ii], side='left'),\n w0int.searchsorted(w1bins[ii+1], side='left')]\n for ii in range(nbins)]\n\n for ii in range(nbins):\n specnew[ii] = np.sum(spec0int[inds2[ii][0]: inds2[ii][1]])\n\n return specnew\n"
] |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.