text stringlengths 81 112k |
|---|
Creates a markdown table. The first row will be headers.
Parameters
----------
table : list of lists of str
A list of rows containing strings. If any of these strings
consist of multiple lines, they will be converted to single line
because markdown tables do not support multiline cells.
Returns
-------
str
The markdown formatted string
Example
-------
>>> table_data = [
... ["Species", "Coolness"],
... ["Dog", "Awesome"],
... ["Cat", "Meh"],
... ]
>>> print(data2md(table_data))
| Species | Coolness |
|---------|----------|
| Dog | Awesome |
| Cat | Meh |
def data2md(table):
"""
Creates a markdown table. The first row will be headers.
Parameters
----------
table : list of lists of str
A list of rows containing strings. If any of these strings
consist of multiple lines, they will be converted to single line
because markdown tables do not support multiline cells.
Returns
-------
str
The markdown formatted string
Example
-------
>>> table_data = [
... ["Species", "Coolness"],
... ["Dog", "Awesome"],
... ["Cat", "Meh"],
... ]
>>> print(data2md(table_data))
| Species | Coolness |
|---------|----------|
| Dog | Awesome |
| Cat | Meh |
"""
table = copy.deepcopy(table)
table = ensure_table_strings(table)
table = multis_2_mono(table)
table = add_cushions(table)
widths = []
for column in range(len(table[0])):
widths.append(get_column_width(column, table))
output = '|'
for i in range(len(table[0])):
output = ''.join(
[output, center_line(widths[i], table[0][i]), '|'])
output = output + '\n|'
for i in range(len(table[0])):
output = ''.join([
output, center_line(widths[i], "-" * widths[i]), '|'])
output = output + '\n|'
for row in range(1, len(table)):
for column in range(len(table[row])):
output = ''.join(
[output, center_line(widths[column],
table[row][column]), '|'])
output = output + '\n|'
split = output.split('\n')
split.pop()
table_string = '\n'.join(split)
return table_string |
Vertically center the text within the cell's grid.
Like this::
+--------+ +--------+
| foobar | | |
| | | |
| | --> | foobar |
| | | |
| | | |
+--------+ +--------+
Parameters
----------
cell : dashtable.data2rst.Cell
Returns
-------
cell : dashtable.data2rst.Cell
def v_center_cell_text(cell):
"""
Vertically center the text within the cell's grid.
Like this::
+--------+ +--------+
| foobar | | |
| | | |
| | --> | foobar |
| | | |
| | | |
+--------+ +--------+
Parameters
----------
cell : dashtable.data2rst.Cell
Returns
-------
cell : dashtable.data2rst.Cell
"""
lines = cell.text.split('\n')
cell_width = len(lines[0]) - 2
truncated_lines = []
for i in range(1, len(lines) - 1):
truncated = lines[i][1:len(lines[i]) - 1]
truncated_lines.append(truncated)
total_height = len(truncated_lines)
empty_lines_above = 0
for i in range(len(truncated_lines)):
if truncated_lines[i].rstrip() == '':
empty_lines_above += 1
else:
break
empty_lines_below = 0
for i in reversed(range(len(truncated_lines))):
if truncated_lines[i].rstrip() == '':
empty_lines_below += 1
else:
break
significant_lines = truncated_lines[
empty_lines_above:len(truncated_lines) - empty_lines_below
]
remainder = total_height - len(significant_lines)
blank = cell_width * ' '
above_height = math.floor(remainder / 2)
for i in range(0, above_height):
significant_lines.insert(0, blank)
below_height = math.ceil(remainder / 2)
for i in range(0, below_height):
significant_lines.append(blank)
for i in range(len(significant_lines)):
lines[i + 1] = ''.join([
lines[i + 1][0] + significant_lines[i] + lines[i + 1][-1]
])
cell.text = '\n'.join(lines)
return cell |
Convert a list of lists of str into a reStructuredText Grid Table
Parameters
----------
table : list of lists of str
spans : list of lists of lists of int, optional
These are [row, column] pairs of cells that are merged in the
table. Rows and columns start in the top left of the table.For
example::
+--------+--------+
| [0, 0] | [0, 1] |
+--------+--------+
| [1, 0] | [1, 1] |
+--------+--------+
use_headers : bool, optional
Whether or not the first row of table data will become headers.
center_cells : bool, optional
Whether or not cells will be centered
center_headers: bool, optional
Whether or not headers will be centered
Returns
-------
str
The grid table string
Example
-------
>>> spans = [
... [ [3, 1], [4, 1] ],
... [ [3, 2], [4, 2] ],
... [ [2, 1], [2, 2] ],
... ]
>>> table = [
... ["Header 1", "Header 2", "Header 3"],
... ["body row 1", "column 2", "column 3"],
... ["body row 2", "Cells may span columns", ""],
... ["body row 3", "Cells may span rows.", "- Cells\\n-contain\\n-blocks"],
... ["body row 4", "", ""],
... ]
>>> print(dashtable.data2rst(table, spans))
+------------+------------+-----------+
| Header 1 | Header 2 | Header 3 |
+============+============+===========+
| body row 1 | column 2 | column 3 |
+------------+------------+-----------+
| body row 2 | Cells may span columns.|
+------------+------------+-----------+
| body row 3 | Cells may | - Cells |
+------------+ span rows. | - contain |
| body row 4 | | - blocks. |
+------------+------------+-----------+
def data2rst(table, spans=[[[0, 0]]], use_headers=True,
center_cells=False, center_headers=False):
"""
Convert a list of lists of str into a reStructuredText Grid Table
Parameters
----------
table : list of lists of str
spans : list of lists of lists of int, optional
These are [row, column] pairs of cells that are merged in the
table. Rows and columns start in the top left of the table.For
example::
+--------+--------+
| [0, 0] | [0, 1] |
+--------+--------+
| [1, 0] | [1, 1] |
+--------+--------+
use_headers : bool, optional
Whether or not the first row of table data will become headers.
center_cells : bool, optional
Whether or not cells will be centered
center_headers: bool, optional
Whether or not headers will be centered
Returns
-------
str
The grid table string
Example
-------
>>> spans = [
... [ [3, 1], [4, 1] ],
... [ [3, 2], [4, 2] ],
... [ [2, 1], [2, 2] ],
... ]
>>> table = [
... ["Header 1", "Header 2", "Header 3"],
... ["body row 1", "column 2", "column 3"],
... ["body row 2", "Cells may span columns", ""],
... ["body row 3", "Cells may span rows.", "- Cells\\n-contain\\n-blocks"],
... ["body row 4", "", ""],
... ]
>>> print(dashtable.data2rst(table, spans))
+------------+------------+-----------+
| Header 1 | Header 2 | Header 3 |
+============+============+===========+
| body row 1 | column 2 | column 3 |
+------------+------------+-----------+
| body row 2 | Cells may span columns.|
+------------+------------+-----------+
| body row 3 | Cells may | - Cells |
+------------+ span rows. | - contain |
| body row 4 | | - blocks. |
+------------+------------+-----------+
"""
table = copy.deepcopy(table)
table_ok = check_table(table)
if not table_ok == "":
return "ERROR: " + table_ok
if not spans == [[[0, 0]]]:
for span in spans:
span_ok = check_span(span, table)
if not span_ok == "":
return "ERROR: " + span_ok
table = ensure_table_strings(table)
table = add_cushions(table)
spans = table_cells_2_spans(table, spans)
widths = get_output_column_widths(table, spans)
heights = get_output_row_heights(table, spans)
cells = []
for span in spans:
cell = make_cell(table, span, widths, heights, use_headers)
cells.append(cell)
cells = list(sorted(cells))
if center_cells:
for cell in cells:
if not cell.is_header:
center_cell_text(cell)
v_center_cell_text(cell)
if center_headers:
for cell in cells:
if cell.is_header:
center_cell_text(cell)
v_center_cell_text(cell)
grid_table = merge_all_cells(cells)
return grid_table |
Calculate reasonable height and width for tree given N tips
def set_dims_from_tree_size(self):
"Calculate reasonable height and width for tree given N tips"
tlen = len(self.treelist[0])
if self.style.orient in ("right", "left"):
# long tip-wise dimension
if not self.style.height:
self.style.height = max(275, min(1000, 18 * (tlen)))
if not self.style.width:
self.style.width = max(300, min(500, 18 * (tlen)))
else:
# long tip-wise dimension
if not self.style.width:
self.style.width = max(275, min(1000, 18 * (tlen)))
if not self.style.height:
self.style.height = max(225, min(500, 18 * (tlen))) |
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
def add_tip_labels_to_axes(self):
"""
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
"""
# get tip-coords and replace if using fixed_order
if self.style.orient in ("up", "down"):
ypos = np.zeros(self.ntips)
xpos = np.arange(self.ntips)
if self.style.orient in ("right", "left"):
xpos = np.zeros(self.ntips)
ypos = np.arange(self.ntips)
# pop fill from color dict if using color
if self.style.tip_labels_colors:
self.style.tip_labels_style.pop("fill")
# fill anchor shift if None
# (Toytrees fill this at draw() normally when tip_labels != None)
if self.style.tip_labels_style["-toyplot-anchor-shift"] is None:
self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px"
# add tip names to coordinates calculated above
self.axes.text(
xpos,
ypos,
self.tip_labels,
angle=(0 if self.style.orient in ("right", "left") else -90),
style=self.style.tip_labels_style,
color=self.style.tip_labels_colors,
)
# get stroke-width for aligned tip-label lines (optional)
# copy stroke-width from the edge_style unless user set it
if not self.style.edge_align_style.get("stroke-width"):
self.style.edge_align_style['stroke-width'] = (
self.style.edge_style['stroke-width']) |
Modifies display range to ensure tip labels fit. This is a bit hackish
still. The problem is that the 'extents' range of the rendered text
is totally correct. So we add a little buffer here. Should add for
user to be able to modify this if needed. If not using edge lengths
then need to use unit length for treeheight.
def fit_tip_labels(self):
"""
Modifies display range to ensure tip labels fit. This is a bit hackish
still. The problem is that the 'extents' range of the rendered text
is totally correct. So we add a little buffer here. Should add for
user to be able to modify this if needed. If not using edge lengths
then need to use unit length for treeheight.
"""
if not self.tip_labels:
return
# longest name (this will include html hacks)
longest_name = max([len(i) for i in self.tip_labels])
if longest_name > 10:
multiplier = 0.85
else:
multiplier = 0.25
if self.style.use_edge_lengths:
addon = (self.treelist[0].treenode.height + (
self.treelist[0].treenode.height * multiplier))
else:
addon = self.treelist[0].treenode.get_farthest_leaf(True)[1]
# modify display for orientations
if self.style.orient == "right":
self.axes.x.domain.max = addon
elif self.style.orient == "down":
self.axes.y.domain.min = -1 * addon |
Adds 2 newlines to the end of text
def convert_p(element, text):
"""
Adds 2 newlines to the end of text
"""
depth = -1
while element:
if (not element.name == '[document]' and
not element.parent.get('id') == '__RESTRUCTIFY_WRAPPER__'):
depth += 1
element = element.parent
if text:
text = ' ' * depth + text
return text |
Convert a simple table to data (the kind used by DashTable)
Parameters
----------
text : str
A valid simple rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a [row, column] pair that defines a group of merged
cells in the table. In a simple rst table, spans can only be
colspans.
use_headers : bool
Whether or not this table uses headers
headers_row : int
The row where headers are located
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> html_text = '''
... ====== ===== ======
... Inputs Output
... ------------- ------
... A B A or B
... ====== ===== ======
... False False False
... True False True
... False True True
... True True True
... ====== ===== ======
... '''
>>> from dashtable import simple2data
>>> table, spans, use_headers, headers_row = simple2data(html_text)
>>> from pprint import pprint
>>> pprint(table)
[['Inputs', 'Output', ''],
['A', 'B', 'A or B'],
['False', 'False', 'False'],
['True, 'False', 'True'],
['False', 'True', 'True'],
['True', 'True', 'True']]
>>> print(spans)
[[[0, 0], [0, 1]]]
>>> print(use_headers)
True
>>> print(headers_row)
1
def simple2data(text):
"""
Convert a simple table to data (the kind used by DashTable)
Parameters
----------
text : str
A valid simple rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a [row, column] pair that defines a group of merged
cells in the table. In a simple rst table, spans can only be
colspans.
use_headers : bool
Whether or not this table uses headers
headers_row : int
The row where headers are located
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> html_text = '''
... ====== ===== ======
... Inputs Output
... ------------- ------
... A B A or B
... ====== ===== ======
... False False False
... True False True
... False True True
... True True True
... ====== ===== ======
... '''
>>> from dashtable import simple2data
>>> table, spans, use_headers, headers_row = simple2data(html_text)
>>> from pprint import pprint
>>> pprint(table)
[['Inputs', 'Output', ''],
['A', 'B', 'A or B'],
['False', 'False', 'False'],
['True, 'False', 'True'],
['False', 'True', 'True'],
['True', 'True', 'True']]
>>> print(spans)
[[[0, 0], [0, 1]]]
>>> print(use_headers)
True
>>> print(headers_row)
1
"""
try:
import docutils.statemachine
import docutils.parsers.rst.tableparser
except ImportError:
print("ERROR: You must install the docutils library to use simple2data")
return
lines = text.split('\n')
lines = truncate_empty_lines(lines)
leading_space = lines[0].replace(lines[0].lstrip(), '')
for i in range(len(lines)):
lines[i] = lines[i][len(leading_space)::]
parser = docutils.parsers.rst.tableparser.SimpleTableParser()
block = docutils.statemachine.StringList(list(lines))
simple_data = list(parser.parse(block))
column_widths = simple_data.pop(0)
column_count = len(column_widths)
headers_row = 0
if len(simple_data[0]) > 0:
use_headers = True
headers_row = len(simple_data[0]) - 1
headers = simple_data[0][0]
row_count = len(simple_data[1]) + len(simple_data[0])
while len(simple_data[0]) > 0:
simple_data[1].insert(0, simple_data[0][-1])
simple_data[0].pop(-1)
simple_data.pop(0)
else:
use_headers = False
simple_data.pop(0)
row_count = len(simple_data[0])
simple_data = simple_data[0]
table = make_empty_table(row_count, column_count)
spans = []
for row in range(len(simple_data)):
for column in range(len(simple_data[row])):
try:
text = '\n'.join(simple_data[row][column][3]).rstrip()
table[row][column] = text
extra_rows = simple_data[row][column][0]
extra_columns = simple_data[row][column][1]
span = make_span(row, column, extra_rows, extra_columns)
span = sorted(span)
span = list(span for span,_ in itertools.groupby(span))
if not len(span) == 1:
spans.append(span)
except TypeError:
pass
spans = sorted(spans)
return table, spans, use_headers, headers_row |
Gets the widths of the columns of the output table
Parameters
----------
table : list of lists of str
The table of rows of text
spans : list of lists of int
The [row, column] pairs of combined cells
Returns
-------
widths : list of int
The widths of each column in the output table
def get_output_column_widths(table, spans):
"""
Gets the widths of the columns of the output table
Parameters
----------
table : list of lists of str
The table of rows of text
spans : list of lists of int
The [row, column] pairs of combined cells
Returns
-------
widths : list of int
The widths of each column in the output table
"""
widths = []
for column in table[0]:
widths.append(3)
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count == 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
length = get_longest_line_length(text)
if length > widths[column]:
widths[column] = length
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
column_count = get_span_column_count(span)
if column_count > 1:
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
end_column = text_column + column_count
available_space = sum(
widths[text_column:end_column])
available_space += column_count - 1
length = get_longest_line_length(text)
while length > available_space:
for i in range(text_column, end_column):
widths[i] += 1
available_space = sum(
widths[text_column:end_column])
available_space += column_count - 1
if length <= available_space:
break
return widths |
Make an empty table
Parameters
----------
row_count : int
The number of rows in the new table
column_count : int
The number of columns in the new table
Returns
-------
table : list of lists of str
Each cell will be an empty str ('')
def make_empty_table(row_count, column_count):
"""
Make an empty table
Parameters
----------
row_count : int
The number of rows in the new table
column_count : int
The number of columns in the new table
Returns
-------
table : list of lists of str
Each cell will be an empty str ('')
"""
table = []
while row_count > 0:
row = []
for column in range(column_count):
row.append('')
table.append(row)
row_count -= 1
return table |
\
The linear estimation of the parameter vector :math:`\beta` given by
.. math::
\beta = (X^T X)^-1 X^T y
def beta(self):
'''\
The linear estimation of the parameter vector :math:`\beta` given by
.. math::
\beta = (X^T X)^-1 X^T y
'''
t = self.X.transpose()
XX = dot(t,self.X)
XY = dot(t,self.y)
return linalg.solve(XX,XY) |
Return a generator of formatters codes of type typ
def oftype(self, typ):
'''Return a generator of formatters codes of type typ'''
for key, val in self.items():
if val.type == typ:
yield key |
List of names for series in dataset.
It will always return a list or names with length given by
:class:`~.DynData.count`.
def names(self, with_namespace=False):
'''List of names for series in dataset.
It will always return a list or names with length given by
:class:`~.DynData.count`.
'''
N = self.count()
names = self.name.split(settings.splittingnames)[:N]
n = 0
while len(names) < N:
n += 1
names.append('unnamed%s' % n)
if with_namespace and self.namespace:
n = self.namespace
s = settings.field_separator
return [n + s + f for f in names]
else:
return names |
Dump the timeseries using a specific ``format``.
def dump(self, format=None, **kwargs):
"""Dump the timeseries using a specific ``format``.
"""
formatter = Formatters.get(format, None)
if not format:
return self.display()
elif not formatter:
raise FormattingException('Formatter %s not available' % format)
else:
return formatter(self, **kwargs) |
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| expression EQUAL expression
| expression CONCAT expression
| expression SPLIT expression
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression
| expression EQUAL expression
| expression CONCAT expression
| expression SPLIT expression'''
v = p[2]
if v == '+':
p[0] = PlusOp(p[1], p[3])
elif v == '-':
p[0] = MinusOp(p[1], p[3])
elif v == '*':
p[0] = MultiplyOp(p[1], p[3])
elif v == '/':
p[0] = DivideOp(p[1], p[3])
elif v == '=':
p[0] = EqualOp(p[1], p[3])
elif v == settings.concat_operator:
p[0] = ConcatenationOp(p[1], p[3])
elif v == settings.separator_operator:
p[0] = SplittingOp(p[1], p[3])
elif v == settings.field_operator:
p[0] = Symbol(p[1], field=p[3]) |
expression : LPAREN expression RPAREN
| LSQUARE expression RSQUARE
def p_expression_group(p):
'''expression : LPAREN expression RPAREN
| LSQUARE expression RSQUARE'''
v = p[1]
if v == '(':
p[0] = functionarguments(p[2])
elif v == '[':
p[0] = tsentry(p[2]) |
Combine the side of cell1's grid text with cell2's text.
For example::
cell1 cell2 merge "RIGHT"
+-----+ +------+ +-----+------+
| foo | | dog | | foo | dog |
| | +------+ | +------+
| | | cat | | | cat |
| | +------+ | +------+
| | | bird | | | bird |
+-----+ +------+ +-----+------+
Parameters
----------
cell1 : dashtable.data2rst.Cell
cell2 : dashtable.data2rst.Cell
def merge_cells(cell1, cell2, direction):
"""
Combine the side of cell1's grid text with cell2's text.
For example::
cell1 cell2 merge "RIGHT"
+-----+ +------+ +-----+------+
| foo | | dog | | foo | dog |
| | +------+ | +------+
| | | cat | | | cat |
| | +------+ | +------+
| | | bird | | | bird |
+-----+ +------+ +-----+------+
Parameters
----------
cell1 : dashtable.data2rst.Cell
cell2 : dashtable.data2rst.Cell
"""
cell1_lines = cell1.text.split("\n")
cell2_lines = cell2.text.split("\n")
if direction == "RIGHT":
for i in range(len(cell1_lines)):
cell1_lines[i] = cell1_lines[i] + cell2_lines[i][1::]
cell1.text = "\n".join(cell1_lines)
cell1.column_count += cell2.column_count
elif direction == "TOP":
if cell1_lines[0].count('+') > cell2_lines[-1].count('+'):
cell2_lines.pop(-1)
else:
cell1_lines.pop(0)
cell2_lines.extend(cell1_lines)
cell1.text = "\n".join(cell2_lines)
cell1.row_count += cell2.row_count
cell1.row = cell2.row
cell1.column = cell2.column
elif direction == "BOTTOM":
if (cell1_lines[-1].count('+') > cell2_lines[0].count('+') or
cell1.is_header):
cell2_lines.pop(0)
else:
cell1_lines.pop(-1)
cell1_lines.extend(cell2_lines)
cell1.text = "\n".join(cell1_lines)
cell1.row_count += cell2.row_count
elif direction == "LEFT":
for i in range(len(cell1_lines)):
cell1_lines[i] = cell2_lines[i][0:-1] + cell1_lines[i]
cell1.text = "\n".join(cell1_lines)
cell1.column_count += cell2.column_count
cell1.row = cell2.row
cell1.column = cell2.column |
Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes.
def iterclass(cls):
"""Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes.
"""
for field in dir(cls):
if hasattr(cls, field):
value = getattr(cls, field)
yield field, value |
Returns a tcp socket to (host/port). Retries forever if connection fails
def _mksocket(host, port, q, done, stop):
"""Returns a tcp socket to (host/port). Retries forever if connection fails"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
attempt = 0
while not stop.is_set():
try:
s.connect((host, port))
return s
except Exception as ex:
# Simple exponential backoff: sleep for 1,2,4,8,16,30,30...
time.sleep(min(30, 2 ** attempt))
attempt += 1 |
Worker thread. Connect to host/port, pull data from q until done is set
def _push(host, port, q, done, mps, stop, test_mode):
"""Worker thread. Connect to host/port, pull data from q until done is set"""
sock = None
retry_line = None
while not ( stop.is_set() or ( done.is_set() and retry_line == None and q.empty()) ):
stime = time.time()
if sock == None and not test_mode:
sock = _mksocket(host, port, q, done, stop)
if sock == None:
break
if retry_line:
line = retry_line
retry_line = None
else:
try:
line = q.get(True, 1) # blocking, with 1 second timeout
except:
if done.is_set(): # no items in queue, and parent finished
break
else: # no items in queue, but parent might send more
continue
if not test_mode:
try:
sock.sendall(line.encode('utf-8'))
except:
sock = None # notify that we need to make a new socket at start of loop
retry_line = line # can't really put back in q, so remember to retry this line
continue
etime = time.time() - stime #time that actually elapsed
#Expected value of wait_time is 1/MPS_LIMIT, ie. MPS_LIMIT per second.
if mps > 0:
wait_time = (2.0 * random.random()) / (mps)
if wait_time > etime: #if we should wait
time.sleep(wait_time - etime) #then wait
if sock:
sock.close() |
Log metric name with value val. You must include at least one tag as a kwarg
def log(self, name, val, **tags):
"""Log metric name with value val. You must include at least one tag as a kwarg"""
global _last_timestamp, _last_metrics
# do not allow .log after closing
assert not self.done.is_set(), "worker thread has been closed"
# check if valid metric name
assert all(c in _valid_metric_chars for c in name), "invalid metric name " + name
val = float(val) #Duck type to float/int, if possible.
if int(val) == val:
val = int(val)
if self.host_tag and 'host' not in tags:
tags['host'] = self.host_tag
# get timestamp from system time, unless it's supplied as a tag
timestamp = int(tags.pop('timestamp', time.time()))
assert not self.done.is_set(), "tsdb object has been closed"
assert tags != {}, "Need at least one tag"
tagvals = ' '.join(['%s=%s' % (k, v) for k, v in tags.items()])
# OpenTSDB has major problems if you insert a data point with the same
# metric, timestamp and tags. So we keep a temporary set of what points
# we have sent for the last timestamp value. If we encounter a duplicate,
# it is dropped.
unique_str = "%s, %s, %s, %s, %s" % (name, timestamp, tagvals, self.host, self.port)
if timestamp == _last_timestamp or _last_timestamp == None:
if unique_str in _last_metrics:
return # discard duplicate metrics
else:
_last_metrics.add(unique_str)
else:
_last_timestamp = timestamp
_last_metrics.clear()
line = "put %s %d %s %s\n" % (name, timestamp, val, tagvals)
try:
self.q.put(line, False)
self.queued += 1
except queue.Full:
print("potsdb - Warning: dropping oldest metric because Queue is full. Size: %s" % self.q.qsize(), file=sys.stderr)
self.q.get() #Drop the oldest metric to make room
self.q.put(line, False)
return line |
Scans COM1 through COM255 for available serial ports
returns a list of available ports
def available_ports():
"""
Scans COM1 through COM255 for available serial ports
returns a list of available ports
"""
ports = []
for i in range(256):
try:
p = Serial('COM%d' % i)
p.close()
ports.append(p)
except SerialException:
pass
return ports |
reads the current response data from the object and returns
it in a dict.
Currently 'time' is reported as 0 until clock drift issues are
resolved.
def get_current_response(self):
"""
reads the current response data from the object and returns
it in a dict.
Currently 'time' is reported as 0 until clock drift issues are
resolved.
"""
response = {'port': 0,
'pressed': False,
'key': 0,
'time': 0}
if len(self.__response_structs_queue) > 0:
# make a copy just in case any other internal members of
# XidConnection were tracking the structure
response = self.__response_structs_queue[0].copy()
# we will now hand over 'response' to the calling code,
# so remove it from the internal queue
self.__response_structs_queue.pop(0)
return response |
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
def detect_xid_devices(self):
"""
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
"""
self.__xid_cons = []
for c in self.__com_ports:
device_found = False
for b in [115200, 19200, 9600, 57600, 38400]:
con = XidConnection(c, b)
try:
con.open()
except SerialException:
continue
con.flush_input()
con.flush_output()
returnval = con.send_xid_command("_c1", 5).decode('ASCII')
if returnval.startswith('_xid'):
device_found = True
self.__xid_cons.append(con)
if(returnval != '_xid0'):
# set the device into XID mode
con.send_xid_command('c10')
con.flush_input()
con.flush_output()
# be sure to reset the timer to avoid the 4.66 hours
# problem. (refer to XidConnection.xid_input_found to
# read about the 4.66 hours)
con.send_xid_command('e1')
con.send_xid_command('e5')
con.close()
if device_found:
break |
Returns the device at the specified index
def device_at_index(self, index):
"""
Returns the device at the specified index
"""
if index >= len(self.__xid_cons):
raise ValueError("Invalid device index")
return self.__xid_cons[index] |
gets the value from the device's base timer
def query_base_timer(self):
"""
gets the value from the device's base timer
"""
(_, _, time) = unpack('<ccI', self.con.send_xid_command("e3", 6))
return time |
Polls the device for user input
If there is a keymapping for the device, the key map is applied
to the key reported from the device.
If a response is waiting to be processed, the response is appended
to the internal response_queue
def poll_for_response(self):
"""
Polls the device for user input
If there is a keymapping for the device, the key map is applied
to the key reported from the device.
If a response is waiting to be processed, the response is appended
to the internal response_queue
"""
key_state = self.con.check_for_keypress()
if key_state != NO_KEY_DETECTED:
response = self.con.get_current_response()
if self.keymap is not None:
response['key'] = self.keymap[response['key']]
else:
response['key'] -= 1
self.response_queue.append(response) |
Sets the pulse duration for events in miliseconds when activate_line
is called
def set_pulse_duration(self, duration):
"""
Sets the pulse duration for events in miliseconds when activate_line
is called
"""
if duration > 4294967295:
raise ValueError('Duration is too long. Please choose a value '
'less than 4294967296.')
big_endian = hex(duration)[2:]
if len(big_endian) % 2 != 0:
big_endian = '0'+big_endian
little_endian = []
for i in range(0, len(big_endian), 2):
little_endian.insert(0, big_endian[i:i+2])
for i in range(0, 4-len(little_endian)):
little_endian.append('00')
command = 'mp'
for i in little_endian:
command += chr(int(i, 16))
self.con.send_xid_command(command, 0) |
Triggers an output line on StimTracker.
There are 8 output lines on StimTracker that can be raised in any
combination. To raise lines 1 and 7, for example, you pass in
the list: activate_line(lines=[1, 7]).
To raise a single line, pass in just an integer, or a list with a
single element to the lines keyword argument:
activate_line(lines=3)
or
activate_line(lines=[3])
The `lines` argument must either be an Integer, list of Integers, or
None.
If you'd rather specify a bitmask for setting the lines, you can use
the bitmask keyword argument. Bitmask must be a Integer value between
0 and 255 where 0 specifies no lines, and 255 is all lines. For a
mapping between lines and their bit values, see the `_lines` class
variable.
To use this, call the function as so to activate lines 1 and 6:
activate_line(bitmask=33)
leave_remaining_lines tells the function to only operate on the lines
specified. For example, if lines 1 and 8 are active, and you make
the following function call:
activate_line(lines=4, leave_remaining_lines=True)
This will result in lines 1, 4 and 8 being active.
If you call activate_line(lines=4) with leave_remaining_lines=False
(the default), if lines 1 and 8 were previously active, only line 4
will be active after the call.
def activate_line(self, lines=None, bitmask=None,
leave_remaining_lines=False):
"""
Triggers an output line on StimTracker.
There are 8 output lines on StimTracker that can be raised in any
combination. To raise lines 1 and 7, for example, you pass in
the list: activate_line(lines=[1, 7]).
To raise a single line, pass in just an integer, or a list with a
single element to the lines keyword argument:
activate_line(lines=3)
or
activate_line(lines=[3])
The `lines` argument must either be an Integer, list of Integers, or
None.
If you'd rather specify a bitmask for setting the lines, you can use
the bitmask keyword argument. Bitmask must be a Integer value between
0 and 255 where 0 specifies no lines, and 255 is all lines. For a
mapping between lines and their bit values, see the `_lines` class
variable.
To use this, call the function as so to activate lines 1 and 6:
activate_line(bitmask=33)
leave_remaining_lines tells the function to only operate on the lines
specified. For example, if lines 1 and 8 are active, and you make
the following function call:
activate_line(lines=4, leave_remaining_lines=True)
This will result in lines 1, 4 and 8 being active.
If you call activate_line(lines=4) with leave_remaining_lines=False
(the default), if lines 1 and 8 were previously active, only line 4
will be active after the call.
"""
if lines is None and bitmask is None:
raise ValueError('Must set one of lines or bitmask')
if lines is not None and bitmask is not None:
raise ValueError('Can only set one of lines or bitmask')
if bitmask is not None:
if bitmask not in range(0, 256):
raise ValueError('bitmask must be an integer between '
'0 and 255')
if lines is not None:
if not isinstance(lines, list):
lines = [lines]
bitmask = 0
for l in lines:
if l < 1 or l > 8:
raise ValueError('Line numbers must be between 1 and 8 '
'(inclusive)')
bitmask |= self._lines[l]
self.con.set_digital_output_lines(bitmask, leave_remaining_lines) |
The inverse of activate_line. If a line is active, it deactivates it.
This has the same parameters as activate_line()
def clear_line(self, lines=None, bitmask=None,
leave_remaining_lines=False):
"""
The inverse of activate_line. If a line is active, it deactivates it.
This has the same parameters as activate_line()
"""
if lines is None and bitmask is None:
raise ValueError('Must set one of lines or bitmask')
if lines is not None and bitmask is not None:
raise ValueError('Can only set one of lines or bitmask')
if bitmask is not None:
if bitmask not in range(0, 256):
raise ValueError('bitmask must be an integer between '
'0 and 255')
if lines is not None:
if not isinstance(lines, list):
lines = [lines]
bitmask = 0
for l in lines:
if l < 1 or l > 8:
raise ValueError('Line numbers must be between 1 and 8 '
'(inclusive)')
bitmask |= self._lines[l]
self.con.clear_digital_output_lines(bitmask, leave_remaining_lines) |
Initializes the device with the proper keymaps and name
def init_device(self):
"""
Initializes the device with the proper keymaps and name
"""
try:
product_id = int(self._send_command('_d2', 1))
except ValueError:
product_id = self._send_command('_d2', 1)
if product_id == 0:
self._impl = ResponseDevice(
self.con,
'Cedrus Lumina LP-400 Response Pad System',
lumina_keymap)
elif product_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus SV-1 Voice Key',
None,
'Voice Response')
elif product_id == 2:
model_id = int(self._send_command('_d3', 1))
if model_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-530',
rb_530_keymap)
elif model_id == 2:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-730',
rb_730_keymap)
elif model_id == 3:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-830',
rb_830_keymap)
elif model_id == 4:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-834',
rb_834_keymap)
else:
raise XidError('Unknown RB Device')
elif product_id == 4:
self._impl = StimTracker(
self.con,
'Cedrus C-POD')
elif product_id == b'S':
self._impl = StimTracker(
self.con,
'Cedrus StimTracker')
elif product_id == -99:
raise XidError('Invalid XID device') |
Send an XID command to the device
def _send_command(self, command, expected_bytes):
"""
Send an XID command to the device
"""
response = self.con.send_xid_command(command, expected_bytes)
return response |
Returns a list of all Xid devices connected to your computer.
def get_xid_devices():
"""
Returns a list of all Xid devices connected to your computer.
"""
devices = []
scanner = XidScanner()
for i in range(scanner.device_count()):
com = scanner.device_at_index(i)
com.open()
device = XidDevice(com)
devices.append(device)
return devices |
returns device at a given index.
Raises ValueError if the device at the passed in index doesn't
exist.
def get_xid_device(device_number):
"""
returns device at a given index.
Raises ValueError if the device at the passed in index doesn't
exist.
"""
scanner = XidScanner()
com = scanner.device_at_index(device_number)
com.open()
return XidDevice(com) |
Append receiver.
def connect(self, receiver):
"""Append receiver."""
if not callable(receiver):
raise ValueError('Invalid receiver: %s' % receiver)
self.receivers.append(receiver) |
Remove receiver.
def disconnect(self, receiver):
"""Remove receiver."""
try:
self.receivers.remove(receiver)
except ValueError:
raise ValueError('Unknown receiver: %s' % receiver) |
Send signal.
def send(self, instance, *args, **kwargs):
"""Send signal."""
for receiver in self.receivers:
receiver(instance, *args, **kwargs) |
Support read slaves.
def select(cls, *args, **kwargs):
"""Support read slaves."""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query |
Send signals.
def save(self, force_insert=False, **kwargs):
"""Send signals."""
created = force_insert or not bool(self.pk)
self.pre_save.send(self, created=created)
super(Model, self).save(force_insert=force_insert, **kwargs)
self.post_save.send(self, created=created) |
Send signals.
def delete_instance(self, *args, **kwargs):
"""Send signals."""
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self) |
Get database from given URI/Object.
def get_database(obj, **params):
"""Get database from given URI/Object."""
if isinstance(obj, string_types):
return connect(obj, **params)
return obj |
Initialize application.
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close) |
Close connection to database.
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response |
Bind model to self database.
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta}) |
Return self.application models.
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore] |
Create a new migration.
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto) |
Run migrations.
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations)) |
Rollback migrations.
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name) |
List migrations.
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff)) |
Merge migrations.
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge() |
Integrate a Flask-Script.
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager |
DOES NOT WORK WELL WITH MOPIDY
Hack from
https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program
to support updating the settings, since mopidy is not able to do that yet
Restarts the current program
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function
def restart_program():
"""
DOES NOT WORK WELL WITH MOPIDY
Hack from
https://www.daniweb.com/software-development/python/code/260268/restart-your-python-program
to support updating the settings, since mopidy is not able to do that yet
Restarts the current program
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function
"""
python = sys.executable
os.execl(python, python, * sys.argv) |
load liac-arff to pandas DataFrame
:param dict arff:arff dict created liac-arff
:rtype: DataFrame
:return: pandas DataFrame
def __load(arff):
"""
load liac-arff to pandas DataFrame
:param dict arff:arff dict created liac-arff
:rtype: DataFrame
:return: pandas DataFrame
"""
attrs = arff['attributes']
attrs_t = []
for attr in attrs:
if isinstance(attr[1], list):
attrs_t.append("%s@{%s}" % (attr[0], ','.join(attr[1])))
else:
attrs_t.append("%s@%s" % (attr[0], attr[1]))
df = pd.DataFrame(data=arff['data'], columns=attrs_t)
return df |
dump DataFrame to liac-arff
:param DataFrame df:
:param str relation:
:param str description:
:rtype: dict
:return: liac-arff dict
def __dump(df,relation='data',description=''):
"""
dump DataFrame to liac-arff
:param DataFrame df:
:param str relation:
:param str description:
:rtype: dict
:return: liac-arff dict
"""
attrs = []
for col in df.columns:
attr = col.split('@')
if attr[1].count('{')>0 and attr[1].count('}')>0:
vals = attr[1].replace('{','').replace('}','').split(',')
attrs.append((attr[0],vals))
else:
attrs.append((attr[0],attr[1]))
data = list(df.values)
result = {
'attributes':attrs,
'data':data,
'description':description,
'relation':relation
}
return result |
dump DataFrame to file
:param DataFrame df:
:param file fp:
def dump(df,fp):
"""
dump DataFrame to file
:param DataFrame df:
:param file fp:
"""
arff = __dump(df)
liacarff.dump(arff,fp) |
Insert `marker` at `offset` into `text`, and return the marked
line.
.. code-block:: python
>>> markup_line('0\\n1234\\n56', 3)
1>>!<<234
def markup_line(text, offset, marker='>>!<<'):
"""Insert `marker` at `offset` into `text`, and return the marked
line.
.. code-block:: python
>>> markup_line('0\\n1234\\n56', 3)
1>>!<<234
"""
begin = text.rfind('\n', 0, offset)
begin += 1
end = text.find('\n', offset)
if end == -1:
end = len(text)
return text[begin:offset] + marker + text[offset:end] |
Initialize a tokenizer. Should only be called by the
:func:`~textparser.Parser.tokenize` method in the parser.
def tokenize_init(spec):
"""Initialize a tokenizer. Should only be called by the
:func:`~textparser.Parser.tokenize` method in the parser.
"""
tokens = [Token('__SOF__', '__SOF__', 0)]
re_token = '|'.join([
'(?P<{}>{})'.format(name, regex) for name, regex in spec
])
return tokens, re_token |
Tokenize given string `text`, and return a list of tokens. Raises
:class:`~textparser.TokenizeError` on failure.
This method should only be called by
:func:`~textparser.Parser.parse()`, but may very well be
overridden if the default implementation does not match the
parser needs.
def tokenize(self, text):
"""Tokenize given string `text`, and return a list of tokens. Raises
:class:`~textparser.TokenizeError` on failure.
This method should only be called by
:func:`~textparser.Parser.parse()`, but may very well be
overridden if the default implementation does not match the
parser needs.
"""
names, specs = self._unpack_token_specs()
keywords = self.keywords()
tokens, re_token = tokenize_init(specs)
for mo in re.finditer(re_token, text, re.DOTALL):
kind = mo.lastgroup
if kind == 'SKIP':
pass
elif kind != 'MISMATCH':
value = mo.group(kind)
if value in keywords:
kind = value
if kind in names:
kind = names[kind]
tokens.append(Token(kind, value, mo.start()))
else:
raise TokenizeError(text, mo.start())
return tokens |
Parse given string `text` and return the parse tree. Raises
:class:`~textparser.ParseError` on failure.
Returns a parse tree of tokens if `token_tree` is ``True``.
.. code-block:: python
>>> MyParser().parse('Hello, World!')
['Hello', ',', 'World', '!']
>>> tree = MyParser().parse('Hello, World!', token_tree=True)
>>> from pprint import pprint
>>> pprint(tree)
[Token(kind='WORD', value='Hello', offset=0),
Token(kind=',', value=',', offset=5),
Token(kind='WORD', value='World', offset=7),
Token(kind='!', value='!', offset=12)]
def parse(self, text, token_tree=False, match_sof=False):
"""Parse given string `text` and return the parse tree. Raises
:class:`~textparser.ParseError` on failure.
Returns a parse tree of tokens if `token_tree` is ``True``.
.. code-block:: python
>>> MyParser().parse('Hello, World!')
['Hello', ',', 'World', '!']
>>> tree = MyParser().parse('Hello, World!', token_tree=True)
>>> from pprint import pprint
>>> pprint(tree)
[Token(kind='WORD', value='Hello', offset=0),
Token(kind=',', value=',', offset=5),
Token(kind='WORD', value='World', offset=7),
Token(kind='!', value='!', offset=12)]
"""
try:
tokens = self.tokenize(text)
if len(tokens) == 0 or tokens[-1].kind != '__EOF__':
tokens.append(Token('__EOF__', '__EOF__', len(text)))
if not match_sof:
if len(tokens) > 0 and tokens[0].kind == '__SOF__':
del tokens[0]
return Grammar(self.grammar()).parse(tokens, token_tree)
except (TokenizeError, GrammarError) as e:
raise ParseError(text, e.offset) |
Calculates inverse profile - for given y returns x such that f(x) = y
If given y is not found in the self.y, then interpolation is used.
By default returns first result looking from left,
if reverse argument set to True,
looks from right. If y is outside range of self.y
then np.nan is returned.
Use inverse lookup to get x-coordinate of first point:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(5.))
0.0
Use inverse lookup to get x-coordinate of second point,
looking from left:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10.))
0.1
Use inverse lookup to get x-coordinate of fourth point,
looking from right:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10., reverse=True))
0.3
Use interpolation between first two points:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(7.5))
0.05
Looking for y below self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(2.0))
nan
Looking for y above self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(22.0))
nan
:param y: reference value
:param reverse: boolean value - direction of lookup
:return: x value corresponding to given y or NaN if not found
def x_at_y(self, y, reverse=False):
"""
Calculates inverse profile - for given y returns x such that f(x) = y
If given y is not found in the self.y, then interpolation is used.
By default returns first result looking from left,
if reverse argument set to True,
looks from right. If y is outside range of self.y
then np.nan is returned.
Use inverse lookup to get x-coordinate of first point:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(5.))
0.0
Use inverse lookup to get x-coordinate of second point,
looking from left:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10.))
0.1
Use inverse lookup to get x-coordinate of fourth point,
looking from right:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(10., reverse=True))
0.3
Use interpolation between first two points:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(7.5))
0.05
Looking for y below self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(2.0))
nan
Looking for y above self.y range:
>>> float(Profile([[0.0, 5.0], [0.1, 10.0], [0.2, 20.0], [0.3, 10.0]])\
.x_at_y(22.0))
nan
:param y: reference value
:param reverse: boolean value - direction of lookup
:return: x value corresponding to given y or NaN if not found
"""
logger.info('Running %(name)s.y_at_x(y=%(y)s, reverse=%(rev)s)',
{"name": self.__class__, "y": y, "rev": reverse})
# positive or negative direction handles
x_handle, y_handle = self.x, self.y
if reverse:
x_handle, y_handle = self.x[::-1], self.y[::-1]
# find the index of first value in self.y greater or equal than y
cond = y_handle >= y
ind = np.argmax(cond)
# two boundary conditions where x cannot be found:
# A) y > max(self.y)
# B) y < min(self.y)
# A) if y > max(self.y) then condition self.y >= y
# will never be satisfied
# np.argmax( cond ) will be equal 0 and cond[ind] will be False
if not cond[ind]:
return np.nan
# B) if y < min(self.y) then condition self.y >= y
# will be satisfied on first item
# np.argmax(cond) will be equal 0,
# to exclude situation that y_handle[0] = y
# we also check if y < y_handle[0]
if ind == 0 and y < y_handle[0]:
return np.nan
# use lookup if y in self.y:
if cond[ind] and y_handle[ind] == y:
return x_handle[ind]
# alternatively - pure python implementation
# return x_handle[ind] - \
# ((x_handle[ind] - x_handle[ind - 1]) / \
# (y_handle[ind] - y_handle[ind - 1])) * \
# (y_handle[ind] - y)
# use interpolation
sl = slice(ind - 1, ind + 1)
return np.interp(y, y_handle[sl], x_handle[sl]) |
Width at given level
:param level:
:return:
def width(self, level):
"""
Width at given level
:param level:
:return:
"""
return self.x_at_y(level, reverse=True) - self.x_at_y(level) |
Normalize to 1 over [-dt, +dt] area, if allow_cast is set
to True, division not in place and casting may occur.
If division in place is not possible and allow_cast is False
an exception is raised.
>>> a = Profile([[0, 0], [1, 5], [2, 10], [3, 5], [4, 0]])
>>> a.normalize(1, allow_cast=True)
>>> print(a.y)
[0. 2. 4. 2. 0.]
:param dt:
:param allow_cast:
def normalize(self, dt, allow_cast=True):
"""
Normalize to 1 over [-dt, +dt] area, if allow_cast is set
to True, division not in place and casting may occur.
If division in place is not possible and allow_cast is False
an exception is raised.
>>> a = Profile([[0, 0], [1, 5], [2, 10], [3, 5], [4, 0]])
>>> a.normalize(1, allow_cast=True)
>>> print(a.y)
[0. 2. 4. 2. 0.]
:param dt:
:param allow_cast:
"""
if dt <= 0:
raise ValueError("Expected positive input")
logger.info('Running %(name)s.normalize(dt=%(dt)s)', {"name": self.__class__, "dt": dt})
try:
ave = np.average(self.y[np.fabs(self.x) <= dt])
except RuntimeWarning as e:
logger.error('in normalize(). self class is %(name)s, dt=%(dt)s', {"name": self.__class__, "dt": dt})
raise Exception("Scaling factor error: {0}".format(e))
try:
self.y /= ave
except TypeError as e:
logger.warning("Division in place is impossible: %s", e)
if allow_cast:
self.y = self.y / ave
else:
logger.error("Division in place impossible - allow_cast flag set to True should help")
raise |
Rescales self.y by given factor, if allow_cast is set to True
and division in place is impossible - casting and not in place
division may occur occur. If in place is impossible and allow_cast
is set to False - an exception is raised.
Check simple rescaling by 2 with no casting
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.float)
>>> c.rescale(2, allow_cast=False)
>>> print(c.y)
[0. 2.5 5. ]
Check rescaling with floor division
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int)
>>> c.rescale(1.5, allow_cast=True)
>>> print(c.y)
[0 3 6]
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int)
>>> c.rescale(-1, allow_cast=True)
>>> print(c.y)
[ 0 -5 -10]
:param factor: rescaling factor, should be a number
:param allow_cast: bool - allow division not in place
def rescale(self, factor=1.0, allow_cast=True):
"""
Rescales self.y by given factor, if allow_cast is set to True
and division in place is impossible - casting and not in place
division may occur occur. If in place is impossible and allow_cast
is set to False - an exception is raised.
Check simple rescaling by 2 with no casting
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.float)
>>> c.rescale(2, allow_cast=False)
>>> print(c.y)
[0. 2.5 5. ]
Check rescaling with floor division
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int)
>>> c.rescale(1.5, allow_cast=True)
>>> print(c.y)
[0 3 6]
>>> c = Curve([[0, 0], [5, 5], [10, 10]], dtype=np.int)
>>> c.rescale(-1, allow_cast=True)
>>> print(c.y)
[ 0 -5 -10]
:param factor: rescaling factor, should be a number
:param allow_cast: bool - allow division not in place
"""
try:
self.y /= factor
except TypeError as e:
logger.warning("Division in place is impossible: %s", e)
if allow_cast:
self.y = self.y / factor
else:
logger.error("allow_cast flag set to True should help")
raise |
Creating new Curve object in memory with domain passed as a parameter.
New domain must include in the original domain.
Copies values from original curve and uses interpolation to calculate
values for new points in domain.
Calculate y - values of example curve with changed domain:
>>> print(Curve([[0,0], [5, 5], [10, 0]])\
.change_domain([1, 2, 8, 9]).y)
[1. 2. 2. 1.]
:param domain: set of points representing new domain.
Might be a list or np.array.
:return: new Curve object with domain set by 'domain' parameter
def change_domain(self, domain):
"""
Creating new Curve object in memory with domain passed as a parameter.
New domain must include in the original domain.
Copies values from original curve and uses interpolation to calculate
values for new points in domain.
Calculate y - values of example curve with changed domain:
>>> print(Curve([[0,0], [5, 5], [10, 0]])\
.change_domain([1, 2, 8, 9]).y)
[1. 2. 2. 1.]
:param domain: set of points representing new domain.
Might be a list or np.array.
:return: new Curve object with domain set by 'domain' parameter
"""
logger.info('Running %(name)s.change_domain() with new domain range:[%(ymin)s, %(ymax)s]',
{"name": self.__class__, "ymin": np.min(domain), "ymax": np.max(domain)})
# check if new domain includes in the original domain
if np.max(domain) > np.max(self.x) or np.min(domain) < np.min(self.x):
logger.error('Old domain range: [%(xmin)s, %(xmax)s] does not include new domain range:'
'[%(ymin)s, %(ymax)s]', {"xmin": np.min(self.x), "xmax": np.max(self.x),
"ymin": np.min(domain), "ymax": np.max(domain)})
raise ValueError('in change_domain():' 'the old domain does not include the new one')
y = np.interp(domain, self.x, self.y)
# We need to join together domain and values (y) because we are recreating Curve object
# (we pass it as argument to self.__class__)
# np.dstack((arrays), axis=1) joins given arrays like np.dstack() but it also nests the result
# in additional list and this is the reason why we use [0] to remove this extra layer of list like this:
# np.dstack([[0, 5, 10], [0, 0, 0]]) gives [[[ 0, 0], [ 5, 0], [10, 0]]] so use dtack()[0]
# to get this: [[0,0], [5, 5], [10, 0]]
# which is a 2 dimensional array and can be used to create a new Curve object
obj = self.__class__(np.dstack((domain, y))[0], **self.__dict__['metadata'])
return obj |
Provides effective way to compute new domain basing on
step and fixp parameters. Then using change_domain() method
to create new object with calculated domain and returns it.
fixp doesn't have to be inside original domain.
Return domain of a new curve specified by
fixp=0 and step=1 and another Curve object:
>>> print(Curve([[0,0], [5, 5], [10, 0]]).rebinned(1, 0).x)
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]
:param step: step size of new domain
:param fixp: fixed point one of the points in new domain
:return: new Curve object with domain specified by
step and fixp parameters
def rebinned(self, step=0.1, fixp=0):
"""
Provides effective way to compute new domain basing on
step and fixp parameters. Then using change_domain() method
to create new object with calculated domain and returns it.
fixp doesn't have to be inside original domain.
Return domain of a new curve specified by
fixp=0 and step=1 and another Curve object:
>>> print(Curve([[0,0], [5, 5], [10, 0]]).rebinned(1, 0).x)
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]
:param step: step size of new domain
:param fixp: fixed point one of the points in new domain
:return: new Curve object with domain specified by
step and fixp parameters
"""
logger.info('Running %(name)s.rebinned(step=%(st)s, fixp=%(fx)s)',
{"name": self.__class__, "st": step, "fx": fixp})
a, b = (np.min(self.x), np.max(self.x))
count_start = abs(fixp - a) / step
count_stop = abs(fixp - b) / step
# depending on position of fixp with respect to the original domain
# 3 cases may occur:
if fixp < a:
count_start = math.ceil(count_start)
count_stop = math.floor(count_stop)
elif fixp > b:
count_start = -math.floor(count_start)
count_stop = -math.ceil(count_stop)
else:
count_start = -count_start
count_stop = count_stop
domain = [fixp + n * step for n in range(int(count_start), int(count_stop) + 1)]
return self.change_domain(domain) |
Returns Y value at arg of self. Arg can be a scalar,
but also might be np.array or other iterable
(like list). If domain of self is not wide enough to
interpolate the value of Y, method will return
def_val for those arguments instead.
Check the interpolation when arg in domain of self:
>>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x([1, 2 ,3])
array([1., 2., 3.])
Check if behavior of the method is correct when arg
id partly outside the domain:
>>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x(\
[-1, 1, 2 ,3, 5], 100)
array([100., 1., 2., 3., 100.])
:param arg: x-value to calculate Y (may be an array or list as well)
:param def_val: default value to return if can't interpolate at arg
:return: np.array of Y-values at arg. If arg is a scalar,
will return scalar as well
def evaluate_at_x(self, arg, def_val=0):
"""
Returns Y value at arg of self. Arg can be a scalar,
but also might be np.array or other iterable
(like list). If domain of self is not wide enough to
interpolate the value of Y, method will return
def_val for those arguments instead.
Check the interpolation when arg in domain of self:
>>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x([1, 2 ,3])
array([1., 2., 3.])
Check if behavior of the method is correct when arg
id partly outside the domain:
>>> Curve([[0, 0], [2, 2], [4, 4]]).evaluate_at_x(\
[-1, 1, 2 ,3, 5], 100)
array([100., 1., 2., 3., 100.])
:param arg: x-value to calculate Y (may be an array or list as well)
:param def_val: default value to return if can't interpolate at arg
:return: np.array of Y-values at arg. If arg is a scalar,
will return scalar as well
"""
y = np.interp(arg, self.x, self.y, left=def_val, right=def_val)
return y |
Method that calculates difference between 2 curves
(or subclasses of curves). Domain of self must be in
domain of curve2 what means min(self.x) >= min(curve2.x)
and max(self.x) <= max(curve2.x).
Might modify self, and can return the result or None
Use subtract as -= operator, check whether returned value is None:
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[-1, 1], [5, 1]])) is None
True
Use subtract again but return a new object this time.
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[-1, 1], [5, 1]]), new_obj=True).y
DataSet([-1., 0., 1., 0.])
Try using wrong inputs to create a new object,
and check whether it throws an exception:
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[1, -1], [2, -1]]), new_obj=True) is None
Traceback (most recent call last):
...
Exception: curve2 does not include self domain
:param curve2: second object to calculate difference
:param new_obj: if True, method is creating new object
instead of modifying self
:return: None if new_obj is False (but will modify self)
or type(self) object containing the result
def subtract(self, curve2, new_obj=False):
"""
Method that calculates difference between 2 curves
(or subclasses of curves). Domain of self must be in
domain of curve2 what means min(self.x) >= min(curve2.x)
and max(self.x) <= max(curve2.x).
Might modify self, and can return the result or None
Use subtract as -= operator, check whether returned value is None:
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[-1, 1], [5, 1]])) is None
True
Use subtract again but return a new object this time.
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[-1, 1], [5, 1]]), new_obj=True).y
DataSet([-1., 0., 1., 0.])
Try using wrong inputs to create a new object,
and check whether it throws an exception:
>>> Curve([[0, 0], [1, 1], [2, 2], [3, 1]]).subtract(\
Curve([[1, -1], [2, -1]]), new_obj=True) is None
Traceback (most recent call last):
...
Exception: curve2 does not include self domain
:param curve2: second object to calculate difference
:param new_obj: if True, method is creating new object
instead of modifying self
:return: None if new_obj is False (but will modify self)
or type(self) object containing the result
"""
# domain1 = [a1, b1]
# domain2 = [a2, b2]
a1, b1 = np.min(self.x), np.max(self.x)
a2, b2 = np.min(curve2.x), np.max(curve2.x)
# check whether domain condition is satisfied
if a2 > a1 or b2 < b1:
logger.error("Domain of self must be in domain of given curve")
raise Exception("curve2 does not include self domain")
# if we want to create and return a new object
# rather then modify existing one
if new_obj:
return functions.subtract(self, curve2.change_domain(self.x))
values = curve2.evaluate_at_x(self.x)
self.y = self.y - values
return None |
Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.
def alert(text='', title='', button='OK'):
"""Displays a simple message box with text and a single OK button. Returns the text of the button clicked on."""
messageBoxFunc(0, text, title, MB_OK | MB_SETFOREGROUND | MB_TOPMOST)
return button |
Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.
def confirm(text='', title='', buttons=['OK', 'Cancel']):
"""Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on."""
retVal = messageBoxFunc(0, text, title, MB_OKCANCEL | MB_ICONQUESTION | MB_SETFOREGROUND | MB_TOPMOST)
if retVal == 1 or len(buttons) == 1:
return buttons[0]
elif retVal == 2:
return buttons[1]
else:
assert False, 'Unexpected return value from MessageBox: %s' % (retVal) |
Function calculates difference between curve1 and curve2
and returns new object which domain is an union
of curve1 and curve2 domains
Returned object is of type type(curve1)
and has same metadata as curve1 object
:param curve1: first curve to calculate the difference
:param curve2: second curve to calculate the difference
:param def_val: default value for points that cannot be interpolated
:return: new object of type type(curve1) with element-wise difference
(using interpolation if necessary)
def subtract(curve1, curve2, def_val=0):
"""
Function calculates difference between curve1 and curve2
and returns new object which domain is an union
of curve1 and curve2 domains
Returned object is of type type(curve1)
and has same metadata as curve1 object
:param curve1: first curve to calculate the difference
:param curve2: second curve to calculate the difference
:param def_val: default value for points that cannot be interpolated
:return: new object of type type(curve1) with element-wise difference
(using interpolation if necessary)
"""
coord1 = np.union1d(curve1.x, curve2.x)
y1 = curve1.evaluate_at_x(coord1, def_val)
y2 = curve2.evaluate_at_x(coord1, def_val)
coord2 = y1 - y2
# the below is explained at the end of curve.Curve.change_domain()
obj = curve1.__class__(np.dstack((coord1, coord2))[0], **curve1.__dict__['metadata'])
return obj |
Apply a window-length median filter to a 1D array vector.
Should get rid of 'spike' value 15.
>>> print(medfilt(np.array([1., 15., 1., 1., 1.]), 3))
[1. 1. 1. 1. 1.]
The 'edge' case is a bit tricky...
>>> print(medfilt(np.array([15., 1., 1., 1., 1.]), 3))
[15. 1. 1. 1. 1.]
Inspired by: https://gist.github.com/bhawkins/3535131
def medfilt(vector, window):
"""
Apply a window-length median filter to a 1D array vector.
Should get rid of 'spike' value 15.
>>> print(medfilt(np.array([1., 15., 1., 1., 1.]), 3))
[1. 1. 1. 1. 1.]
The 'edge' case is a bit tricky...
>>> print(medfilt(np.array([15., 1., 1., 1., 1.]), 3))
[15. 1. 1. 1. 1.]
Inspired by: https://gist.github.com/bhawkins/3535131
"""
if not window % 2 == 1:
raise ValueError("Median filter length must be odd.")
if not vector.ndim == 1:
raise ValueError("Input must be one-dimensional.")
k = (window - 1) // 2 # window movement
result = np.zeros((len(vector), window), dtype=vector.dtype)
result[:, k] = vector
for i in range(k):
j = k - i
result[j:, i] = vector[:-j]
result[:j, i] = vector[0]
result[:-j, -(i + 1)] = vector[j:]
result[-j:, -(i + 1)] = vector[-1]
return np.median(result, axis=1) |
Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
def interpn(*args, **kw):
"""Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
"""
method = kw.pop('method', 'cubic')
if kw:
raise ValueError("Unknown arguments: " % kw.keys())
nd = (len(args)-1)//2
if len(args) != 2*nd+1:
raise ValueError("Wrong number of arguments")
q = args[:nd]
qi = args[nd+1:]
a = args[nd]
for j in range(nd):
#print q[j].shape, a.shape
a = interp1d(q[j], a, axis=j, kind=method)(qi[j])
return a |
Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
def npinterpn(*args, **kw):
"""Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
"""
method = kw.pop('method', 'cubic')
if kw:
raise ValueError("Unknown arguments: " % kw.keys())
nd = (len(args)-1)//2
if len(args) != 2*nd+1:
raise ValueError("Wrong number of arguments")
q = args[:nd]
qi = args[nd+1:]
a = args[nd]
for j in range(nd):
#print q[j].shape, a.shape
a = interp(q[j], a, axis=j, kind=method)(qi[j])
return a |
Generate a dictionary of fields for a given Peewee model.
See `model_form` docstring for description of parameters.
def model_fields(model, allow_pk=False, only=None, exclude=None,
field_args=None, converter=None):
"""
Generate a dictionary of fields for a given Peewee model.
See `model_form` docstring for description of parameters.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
model_fields = list(model._meta.sorted_fields)
if not allow_pk:
model_fields.pop(0)
if only:
model_fields = [x for x in model_fields if x.name in only]
elif exclude:
model_fields = [x for x in model_fields if x.name not in exclude]
field_dict = {}
for model_field in model_fields:
name, field = converter.convert(
model,
model_field,
field_args.get(model_field.name))
field_dict[name] = field
return field_dict |
Create a wtforms Form for a given Peewee model class::
from wtfpeewee.orm import model_form
from myproject.myapp.models import User
UserForm = model_form(User)
:param model:
A Peewee model class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
def model_form(model, base_class=Form, allow_pk=False, only=None, exclude=None,
field_args=None, converter=None):
"""
Create a wtforms Form for a given Peewee model class::
from wtfpeewee.orm import model_form
from myproject.myapp.models import User
UserForm = model_form(User)
:param model:
A Peewee model class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, allow_pk, only, exclude, field_args, converter)
return type(model.__name__ + 'Form', (base_class, ), field_dict) |
Displays a simple message box with text and a single OK button. Returns the text of the button clicked on.
def alert(text='', title='', button=OK_TEXT, root=None, timeout=None):
"""Displays a simple message box with text and a single OK button. Returns the text of the button clicked on."""
assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox'
return _buttonbox(msg=text, title=title, choices=[str(button)], root=root, timeout=timeout) |
Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.
def confirm(text='', title='', buttons=[OK_TEXT, CANCEL_TEXT], root=None, timeout=None):
"""Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on."""
assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox'
return _buttonbox(msg=text, title=title, choices=[str(b) for b in buttons], root=root, timeout=timeout) |
Displays a message box with text input, and OK & Cancel buttons. Returns the text entered, or None if Cancel was clicked.
def prompt(text='', title='' , default='', root=None, timeout=None):
"""Displays a message box with text input, and OK & Cancel buttons. Returns the text entered, or None if Cancel was clicked."""
assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox'
return __fillablebox(text, title, default=default, mask=None,root=root, timeout=timeout) |
Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed
def _buttonbox(msg, title, choices, root=None, timeout=None):
"""
Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed
"""
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
# Initialize __replyButtonText to the first choice.
# This is what will be used if the window is closed by the close button.
__replyButtonText = choices[0]
if root:
root.withdraw()
boxRoot = tk.Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = tk.Tk()
boxRoot.withdraw()
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(rootWindowPosition)
boxRoot.minsize(400, 100)
# ------------- define the messageFrame ---------------------------------
messageFrame = tk.Frame(master=boxRoot)
messageFrame.pack(side=tk.TOP, fill=tk.BOTH)
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = tk.Frame(master=boxRoot)
buttonsFrame.pack(side=tk.TOP, fill=tk.BOTH)
# -------------------- place the widgets in the frames -----------------------
messageWidget = tk.Message(messageFrame, text=msg, width=400)
messageWidget.configure(font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=tk.TOP, expand=tk.YES, fill=tk.X, padx='3m', pady='3m')
__put_buttons_in_buttonframe(choices)
# -------------- the action begins -----------
# put the focus on the first button
__firstWidget.focus_force()
boxRoot.deiconify()
if timeout is not None:
boxRoot.after(timeout, timeoutBoxRoot)
boxRoot.mainloop()
try:
boxRoot.destroy()
except tk.TclError:
if __replyButtonText != TIMEOUT_TEXT:
__replyButtonText = None
if root: root.deiconify()
return __replyButtonText |
Put the buttons in the buttons frame
def __put_buttons_in_buttonframe(choices):
"""Put the buttons in the buttons frame"""
global __widgetTexts, __firstWidget, buttonsFrame
__firstWidget = None
__widgetTexts = {}
i = 0
for buttonText in choices:
tempButton = tk.Button(buttonsFrame, takefocus=1, text=buttonText)
_bindArrows(tempButton)
tempButton.pack(expand=tk.YES, side=tk.LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
# remember the text associated with this widget
__widgetTexts[tempButton] = buttonText
# remember the first widget, so we can put the focus there
if i == 0:
__firstWidget = tempButton
i = 1
# for the commandButton, bind activation events to the activation event handler
commandButton = tempButton
handler = __buttonEvent
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
if CANCEL_TEXT in choices:
commandButton.bind('<Escape>', __cancelButtonEvent) |
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
def __fillablebox(msg, title='', default='', mask=None, root=None, timeout=None):
"""
Show a box in which a user can enter some text.
You may optionally specify some default text, which will appear in the
enterbox when it is displayed.
Returns the text that the user entered, or None if he cancels the operation.
"""
global boxRoot, __enterboxText, __enterboxDefaultText
global cancelButton, entryWidget, okButton
if title == None:
title == ''
if default == None:
default = ''
__enterboxDefaultText = default
__enterboxText = __enterboxDefaultText
if root:
root.withdraw()
boxRoot = tk.Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = tk.Tk()
boxRoot.withdraw()
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(rootWindowPosition)
boxRoot.bind('<Escape>', __enterboxCancel)
# ------------- define the messageFrame ---------------------------------
messageFrame = tk.Frame(master=boxRoot)
messageFrame.pack(side=tk.TOP, fill=tk.BOTH)
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = tk.Frame(master=boxRoot)
buttonsFrame.pack(side=tk.TOP, fill=tk.BOTH)
# ------------- define the entryFrame ---------------------------------
entryFrame = tk.Frame(master=boxRoot)
entryFrame.pack(side=tk.TOP, fill=tk.BOTH)
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = tk.Frame(master=boxRoot)
buttonsFrame.pack(side=tk.TOP, fill=tk.BOTH)
#-------------------- the msg widget ----------------------------
messageWidget = tk.Message(messageFrame, width='4.5i', text=msg)
messageWidget.configure(font=(PROPORTIONAL_FONT_FAMILY, PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=tk.RIGHT, expand=1, fill=tk.BOTH, padx='3m', pady='3m')
# --------- entryWidget ----------------------------------------------
entryWidget = tk.Entry(entryFrame, width=40)
_bindArrows(entryWidget, skipArrowKeys=True)
entryWidget.configure(font=(PROPORTIONAL_FONT_FAMILY, TEXT_ENTRY_FONT_SIZE))
if mask:
entryWidget.configure(show=mask)
entryWidget.pack(side=tk.LEFT, padx='3m')
entryWidget.bind('<Return>', __enterboxGetText)
entryWidget.bind('<Escape>', __enterboxCancel)
# put text into the entryWidget and have it pre-highlighted
if __enterboxDefaultText != '':
entryWidget.insert(0,__enterboxDefaultText)
entryWidget.select_range(0, tk.END)
# ------------------ ok button -------------------------------
okButton = tk.Button(buttonsFrame, takefocus=1, text=OK_TEXT)
_bindArrows(okButton)
okButton.pack(expand=1, side=tk.LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event handler
commandButton = okButton
handler = __enterboxGetText
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
# ------------------ cancel button -------------------------------
cancelButton = tk.Button(buttonsFrame, takefocus=1, text=CANCEL_TEXT)
_bindArrows(cancelButton)
cancelButton.pack(expand=1, side=tk.RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m')
# for the commandButton, bind activation events to the activation event handler
commandButton = cancelButton
handler = __enterboxCancel
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
# ------------------- time for action! -----------------
entryWidget.focus_force() # put the focus on the entryWidget
boxRoot.deiconify()
if timeout is not None:
boxRoot.after(timeout, timeoutBoxRoot)
boxRoot.mainloop() # run it!
# -------- after the run has completed ----------------------------------
if root: root.deiconify()
try:
boxRoot.destroy() # button_click didn't destroy boxRoot, so we do it now
except tk.TclError:
if __enterboxText != TIMEOUT_TEXT:
return None
return __enterboxText |
Parse the login xml response
:param response: the login response from the RETS server
:return: None
def parse(self, response):
"""
Parse the login xml response
:param response: the login response from the RETS server
:return: None
"""
self.headers = response.headers
if 'xml' in self.headers.get('Content-Type'):
# Got an XML response, likely an error code.
xml = xmltodict.parse(response.text)
self.analyze_reply_code(xml_response_dict=xml)
lines = response.text.split('\r\n')
if len(lines) < 3:
lines = response.text.split('\n')
for line in lines:
line = line.strip()
name, value = self.read_line(line)
if name:
if name in self.valid_transactions or re.match(pattern='/^X\-/', string=name):
self.capabilities[name] = value
else:
self.details[name] = value |
Reads lines of XML and delimits, strips, and returns.
def read_line(line):
"""Reads lines of XML and delimits, strips, and returns."""
name, value = '', ''
if '=' in line:
name, value = line.split('=', 1)
return [name.strip(), value.strip()] |
Takes a response socket connection and iteratively parses and yields the results as python dictionaries.
:param response: a Requests response object with stream=True
:return:
def generator(self, response):
"""
Takes a response socket connection and iteratively parses and yields the results as python dictionaries.
:param response: a Requests response object with stream=True
:return:
"""
delim = '\t' # Default to tab delimited
columns = []
response.raw.decode_content = True
events = ET.iterparse(BytesIO(response.content))
results = []
for event, elem in events:
# Analyze search record data
if "DATA" == elem.tag:
data_dict = {column: data for column, data in zip(columns, elem.text.split(delim)) if column != ''}
self.parsed_rows += 1 # Rows parsed with all requests
results.append(data_dict)
# Handle reply code
elif "RETS" == elem.tag:
reply_code = elem.get('ReplyCode')
reply_text = elem.get('ReplyText')
if reply_code == '20201':
# RETS Response 20201 - No Records Found
# Generator should continue and return nothing
continue
elif reply_code != '0':
msg = "RETS Error {0!s}: {1!s}".format(reply_code, reply_text)
raise RETSException(msg)
# Analyze delimiter
elif "DELIMITER" == elem.tag:
val = elem.get("value")
delim = chr(int(val))
# Analyze columns
elif "COLUMNS" == elem.tag:
columns = elem.text.split(delim)
# handle max rows
elif "MAXROWS" == elem.tag:
logger.debug("MAXROWS Tag reached in XML")
logger.debug("Received {0!s} results from this search".format(self.parsed_rows))
raise MaxrowException(results)
else:
# This is a tag we don't process (like COUNT)
continue
elem.clear()
return results |
Takes a urlinfo object and returns a flat dictionary.
def flatten_urlinfo(urlinfo, shorter_keys=True):
""" Takes a urlinfo object and returns a flat dictionary."""
def flatten(value, prefix=""):
if is_string(value):
_result[prefix[1:]] = value
return
try:
len(value)
except (AttributeError, TypeError): # a leaf
_result[prefix[1:]] = value
return
try:
items = value.items()
except AttributeError: # an iterable, but not a dict
last_prefix = prefix.split(".")[-1]
if shorter_keys:
prefix = "." + last_prefix
if last_prefix == "Country":
for v in value:
country = v.pop("@Code")
flatten(v, ".".join([prefix, country]))
elif last_prefix in ["RelatedLink", "CategoryData"]:
for i, v in enumerate(value):
flatten(v, ".".join([prefix, str(i)]))
elif value[0].get("TimeRange"):
for v in value:
time_range = ".".join(tuple(v.pop("TimeRange").items())[0])
# python 3 odict_items don't support indexing
if v.get("DataUrl"):
time_range = ".".join([v.pop("DataUrl"), time_range])
flatten(v, ".".join([prefix, time_range]))
else:
msg = prefix + " contains a list we don't know how to flatten."
raise NotImplementedError(msg)
else: # a dict, go one level deeper
for k, v in items:
flatten(v, ".".join([prefix, k]))
_result = {}
info = xmltodict.parse(str(urlinfo))
flatten(info["aws:UrlInfoResponse"]["Response"]["UrlInfoResult"]["Alexa"])
_result["OutputTimestamp"] = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
return _result |
Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get
def create_v4_signature(self, request_params):
'''
Create URI and signature headers based on AWS V4 signing process.
Refer to https://docs.aws.amazon.com/AlexaWebInfoService/latest/ApiReferenceArticle.html for request params.
:param request_params: dictionary of request parameters
:return: URL and header to be passed to requests.get
'''
method = 'GET'
service = 'awis'
host = 'awis.us-west-1.amazonaws.com'
region = 'us-west-1'
endpoint = 'https://awis.amazonaws.com/api'
request_parameters = urlencode([(key, request_params[key]) for key in sorted(request_params.keys())])
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
# Create canonical request
canonical_uri = '/api'
canonical_querystring = request_parameters
canonical_headers = 'host:' + host + '\n' + 'x-amz-date:' + amzdate + '\n'
signed_headers = 'host;x-amz-date'
payload_hash = hashlib.sha256(''.encode('utf8')).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
# Create string to sign
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request.encode('utf8')).hexdigest()
# Calculate signature
signing_key = getSignatureKey(self.secret_access_key, datestamp, region, service)
# Sign the string_to_sign using the signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# Add signing information to the request
authorization_header = algorithm + ' ' + 'Credential=' + self.access_id + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
headers = {'X-Amz-Date':amzdate, 'Authorization':authorization_header, 'Content-Type': 'application/xml', 'Accept': 'application/xml'}
# Create request url
request_url = endpoint + '?' + canonical_querystring
return request_url, headers |
Provide information about supplied domain as specified by the response group
:param domain: Any valid URL
:param response_group: Any valid urlinfo response group
:return: Traffic and/or content data of the domain in XML format
def urlinfo(self, domain, response_group = URLINFO_RESPONSE_GROUPS):
'''
Provide information about supplied domain as specified by the response group
:param domain: Any valid URL
:param response_group: Any valid urlinfo response group
:return: Traffic and/or content data of the domain in XML format
'''
params = {
'Action': "UrlInfo",
'Url': domain,
'ResponseGroup': response_group
}
url, headers = self.create_v4_signature(params)
return self.return_output(url, headers) |
Provide traffic history of supplied domain
:param domain: Any valid URL
:param response_group: Any valid traffic history response group
:return: Traffic and/or content data of the domain in XML format
def traffichistory(self, domain, response_group=TRAFFICINFO_RESPONSE_GROUPS, myrange=31, start=20070801):
'''
Provide traffic history of supplied domain
:param domain: Any valid URL
:param response_group: Any valid traffic history response group
:return: Traffic and/or content data of the domain in XML format
'''
params = {
'Action': "TrafficHistory",
'Url': domain,
'ResponseGroup': response_group,
'Range': myrange,
'Start': start,
}
url, headers = self.create_v4_signature(params)
return self.return_output(url, headers) |
Provide category browse information of specified domain
:param domain: Any valid URL
:param path: Valid category path
:param response_group: Any valid traffic history response group
:return: Traffic and/or content data of the domain in XML format
def cat_browse(self, domain, path, response_group=CATEGORYBROWSE_RESPONSE_GROUPS, descriptions='True'):
'''
Provide category browse information of specified domain
:param domain: Any valid URL
:param path: Valid category path
:param response_group: Any valid traffic history response group
:return: Traffic and/or content data of the domain in XML format
'''
params = {
'Action': "CategoryListings",
'ResponseGroup': 'Listings',
'Path': quote(path),
'Descriptions': descriptions
}
url, headers = self.create_v4_signature(params)
return self.return_output(url, headers) |
Add a capability of the RETS board
:param name: The name of the capability
:param uri: The capability URI given by the RETS board
:return: None
def add_capability(self, name, uri):
"""
Add a capability of the RETS board
:param name: The name of the capability
:param uri: The capability URI given by the RETS board
:return: None
"""
parse_results = urlparse(uri)
if parse_results.hostname is None:
# relative URL given, so build this into an absolute URL
login_url = self.capabilities.get('Login')
if not login_url:
logger.error("There is no login URL stored, so additional capabilities cannot be added.")
raise ValueError("Cannot automatically determine absolute path for {0!s} given.".format(uri))
parts = urlparse(login_url)
port = ':{}'.format(parts.port) if parts.port else ''
uri = parts.scheme + '://' + parts.hostname + port + '/' + uri.lstrip('/')
self.capabilities[name] = uri |
Login to the RETS board and return an instance of Bulletin
:return: Bulletin instance
def login(self):
"""
Login to the RETS board and return an instance of Bulletin
:return: Bulletin instance
"""
response = self._request('Login')
parser = OneXLogin()
parser.parse(response)
self.session_id = response.cookies.get(self.session_id_cookie_name, '')
if parser.headers.get('RETS-Version') is not None:
self.version = str(parser.headers.get('RETS-Version'))
self.client.headers['RETS-Version'] = self.version
for k, v in parser.capabilities.items():
self.add_capability(k, v)
if self.capabilities.get('Action'):
self._request('Action')
return True |
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
def get_resource_metadata(self, resource=None):
"""
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
"""
result = self._make_metadata_request(meta_id=0, metadata_type='METADATA-RESOURCE')
if resource:
result = next((item for item in result if item['ResourceID'] == resource), None)
return result |
Get metadata for a given resource: class
:param resource: The name of the resource
:param resource_class: The name of the class to get metadata from
:return: list
def get_table_metadata(self, resource, resource_class):
"""
Get metadata for a given resource: class
:param resource: The name of the resource
:param resource_class: The name of the class to get metadata from
:return: list
"""
return self._make_metadata_request(meta_id=resource + ':' + resource_class, metadata_type='METADATA-TABLE') |
Get possible lookup values for a given field
:param resource: The name of the resource
:param lookup_name: The name of the the field to get lookup values for
:return: list
def get_lookup_values(self, resource, lookup_name):
"""
Get possible lookup values for a given field
:param resource: The name of the resource
:param lookup_name: The name of the the field to get lookup values for
:return: list
"""
return self._make_metadata_request(meta_id=resource + ':' + lookup_name, metadata_type='METADATA-LOOKUP_TYPE') |
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list
def _make_metadata_request(self, meta_id, metadata_type=None):
"""
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list
"""
# If this metadata _request has already happened, returned the saved result.
key = '{0!s}:{1!s}'.format(metadata_type, meta_id)
if key in self.metadata_responses and self.cache_metadata:
response = self.metadata_responses[key]
else:
response = self._request(
capability='GetMetadata',
options={
'query': {
'Type': metadata_type,
'ID': meta_id,
'Format': self.metadata_format
}
}
)
self.metadata_responses[key] = response
if self.metadata_format == 'COMPACT-DECODED':
parser = CompactMetadata()
else:
parser = StandardXMLetadata()
try:
return parser.parse(response=response, metadata_type=metadata_type)
except RETSException as e:
# Remove response from cache
self.metadata_responses.pop(key, None)
# If the server responds with an invalid parameter for COMPACT-DECODED, try STANDARD-XML
if self.metadata_format != 'STANDARD-XML' and e.reply_code in ['20513', '20514']:
self.metadata_responses.pop(key, None)
self.metadata_format = 'STANDARD-XML'
return self._make_metadata_request(meta_id=meta_id, metadata_type=metadata_type)
raise RETSException(e.reply_text, e.reply_code) |
Get the first object from a Resource
:param resource: The name of the resource
:param object_type: The type of object to fetch
:param content_id: The unique id of the item to get objects for
:param location: The path to get Objects from
:return: Object
def get_preferred_object(self, resource, object_type, content_id, location=0):
"""
Get the first object from a Resource
:param resource: The name of the resource
:param object_type: The type of object to fetch
:param content_id: The unique id of the item to get objects for
:param location: The path to get Objects from
:return: Object
"""
collection = self.get_object(resource=resource, object_type=object_type,
content_ids=content_id, object_ids='0', location=location)
return collection[0] |
Get a list of Objects from a resource
:param resource: The resource to get objects from
:param object_type: The type of object to fetch
:param content_ids: The unique id of the item to get objects for
:param object_ids: ids of the objects to download
:param location: The path to get Objects from
:return: list
def get_object(self, resource, object_type, content_ids, object_ids='*', location=0):
"""
Get a list of Objects from a resource
:param resource: The resource to get objects from
:param object_type: The type of object to fetch
:param content_ids: The unique id of the item to get objects for
:param object_ids: ids of the objects to download
:param location: The path to get Objects from
:return: list
"""
object_helper = GetObject()
request_ids = object_helper.ids(content_ids=content_ids, object_ids=object_ids)
response = self._request(
capability='GetObject',
options={
'query':
{
"Resource": resource,
"Type": object_type,
"ID": ','.join(request_ids),
"Location": location
}
}
)
if 'multipart' in response.headers.get('Content-Type'):
parser = MultipleObjectParser()
collection = parser.parse_image_response(response)
else:
parser = SingleObjectParser()
collection = [parser.parse_image_response(response)]
return collection |
Preform a search on the RETS board
:param resource: The resource that contains the class to search
:param resource_class: The class to search
:param search_filter: The query as a dict
:param dmql_query: The query in dmql format
:param limit: Limit search values count
:param offset: Offset for RETS request. Useful when RETS limits number of results or transactions
:param optional_parameters: Values for option paramters
:param auto_offset: Should the search be allowed to trigger subsequent searches.
:param query_type: DMQL or DMQL2 depending on the rets server.
:param standard_names: 1 to use standard names, 0 to use system names
:param response_format: COMPACT-DECODED, COMPACT, or STANDARD-XML
:return: dict
def search(self, resource, resource_class, search_filter=None, dmql_query=None, limit=9999999, offset=0,
optional_parameters=None, auto_offset=True, query_type='DMQL2', standard_names=0,
response_format='COMPACT-DECODED'):
"""
Preform a search on the RETS board
:param resource: The resource that contains the class to search
:param resource_class: The class to search
:param search_filter: The query as a dict
:param dmql_query: The query in dmql format
:param limit: Limit search values count
:param offset: Offset for RETS request. Useful when RETS limits number of results or transactions
:param optional_parameters: Values for option paramters
:param auto_offset: Should the search be allowed to trigger subsequent searches.
:param query_type: DMQL or DMQL2 depending on the rets server.
:param standard_names: 1 to use standard names, 0 to use system names
:param response_format: COMPACT-DECODED, COMPACT, or STANDARD-XML
:return: dict
"""
if (search_filter and dmql_query) or (not search_filter and not dmql_query):
raise ValueError("You may specify either a search_filter or dmql_query")
search_helper = DMQLHelper()
if dmql_query:
dmql_query = search_helper.dmql(query=dmql_query)
else:
dmql_query = search_helper.filter_to_dmql(filter_dict=search_filter)
parameters = {
'SearchType': resource,
'Class': resource_class,
'Query': dmql_query,
'QueryType': query_type,
'Count': 1,
'Format': response_format,
'StandardNames': standard_names,
}
if not optional_parameters:
optional_parameters = {}
parameters.update(optional_parameters)
# if the Select parameter given is an array, format it as it needs to be
if 'Select' in parameters and isinstance(parameters.get('Select'), list):
parameters['Select'] = ','.join(parameters['Select'])
if limit:
parameters['Limit'] = limit
if offset:
parameters['Offset'] = offset
search_cursor = OneXSearchCursor()
response = self._request(
capability='Search',
options={
'query': parameters,
},
stream=True
)
try:
return search_cursor.generator(response=response)
except MaxrowException as max_exception:
# Recursive searching if automatically performing offsets for the client
if auto_offset and limit > len(max_exception.rows_returned):
new_limit = limit - len(max_exception.rows_returned) # have not returned results to the desired limit
new_offset = offset + len(max_exception.rows_returned) # adjust offset
results = self.search(resource=resource, resource_class=resource_class, search_filter=None,
dmql_query=dmql_query, offset=new_offset, limit=new_limit,
optional_parameters=optional_parameters, auto_offset=auto_offset)
previous_results = max_exception.rows_returned
return previous_results + results
return max_exception.rows_returned |
Make a _request to the RETS server
:param capability: The name of the capability to use to get the URI
:param options: Options to put into the _request
:return: Response
def _request(self, capability, options=None, stream=False):
"""
Make a _request to the RETS server
:param capability: The name of the capability to use to get the URI
:param options: Options to put into the _request
:return: Response
"""
if options is None:
options = {}
options.update({
'headers': self.client.headers.copy()
})
url = self.capabilities.get(capability)
if not url:
msg = "{0!s} tried but no valid endpoints was found. Did you forget to Login?".format(capability)
raise NotLoggedIn(msg)
if self.user_agent_password:
ua_digest = self._user_agent_digest_hash()
options['headers']['RETS-UA-Authorization'] = 'Digest {0!s}'.format(ua_digest)
if self.use_post_method and capability != 'Action': # Action Requests should always be GET
query = options.get('query')
response = self.client.post(url, data=query, headers=options['headers'], stream=stream)
else:
if 'query' in options:
url += '?' + '&'.join('{0!s}={1!s}'.format(k, quote(str(v))) for k, v in options['query'].items())
response = self.client.get(url, headers=options['headers'], stream=stream)
if response.status_code in [400, 401]:
if capability == 'Login':
m = "Could not log into the RETS server with the provided credentials."
else:
m = "The RETS server returned a 401 status code. You must be logged in to make this request."
raise NotLoggedIn(m)
elif response.status_code == 404 and self.use_post_method:
raise HTTPException("Got a 404 when making a POST request. Try setting use_post_method=False when "
"initializing the Session.")
return response |
Hash the user agent and user agent password
Section 3.10 of https://www.nar.realtor/retsorg.nsf/retsproto1.7d6.pdf
:return: md5
def _user_agent_digest_hash(self):
"""
Hash the user agent and user agent password
Section 3.10 of https://www.nar.realtor/retsorg.nsf/retsproto1.7d6.pdf
:return: md5
"""
if not self.version:
raise MissingVersion("A version is required for user agent auth. The RETS server should set this"
"automatically but it has not. Please instantiate the session with a version argument"
"to provide the version.")
version_number = self.version.strip('RETS/')
user_str = '{0!s}:{1!s}'.format(self.user_agent, self.user_agent_password).encode('utf-8')
a1 = hashlib.md5(user_str).hexdigest()
session_id = self.session_id if self.session_id is not None else ''
digest_str = '{0!s}::{1!s}:{2!s}'.format(a1, session_id, version_number).encode('utf-8')
digest = hashlib.md5(digest_str).hexdigest()
return digest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.