INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
For each edge, link it to the vertex types it connects to each other.
|
def _link_vertex_and_edge_types(self):
"""For each edge, link it to the vertex types it connects to each other."""
for edge_class_name in self._edge_class_names:
edge_element = self._elements[edge_class_name]
if (EDGE_SOURCE_PROPERTY_NAME not in edge_element.properties or
EDGE_DESTINATION_PROPERTY_NAME not in edge_element.properties):
if edge_element.abstract:
continue
else:
raise AssertionError(u'Found a non-abstract edge class with undefined '
u'endpoint types: {}'.format(edge_element))
from_class_name = edge_element.properties[EDGE_SOURCE_PROPERTY_NAME].qualifier
to_class_name = edge_element.properties[EDGE_DESTINATION_PROPERTY_NAME].qualifier
edge_schema_element = self._elements[edge_class_name]
# Link from_class_name with edge_class_name
for from_class in self._subclass_sets[from_class_name]:
from_schema_element = self._elements[from_class]
from_schema_element.out_connections.add(edge_class_name)
edge_schema_element.in_connections.add(from_class)
# Link edge_class_name with to_class_name
for to_class in self._subclass_sets[to_class_name]:
to_schema_element = self._elements[to_class]
edge_schema_element.out_connections.add(to_class)
to_schema_element.in_connections.add(edge_class_name)
|
Return True if the Filter block references no non-local fields, and False otherwise.
|
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
|
Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
|
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
|
Return the GraphQL type bound at the given step, or None if no bound is given.
|
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
|
Ensure that the two bounds either are an exact match, or one of them is None.
|
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
|
Return a MATCH query where only preferred locations are valid as query start locations.
|
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
|
Return a MATCH query where all eligible locations are valid as query start locations.
|
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
|
Ensure that OrientDB only considers desirable query start points in query planning.
|
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
|
Return an Expression that is the `&&` of all the expressions in the given list.
|
def _expression_list_to_conjunction(expression_list):
"""Return an Expression that is the `&&` of all the expressions in the given list."""
if not isinstance(expression_list, list):
raise AssertionError(u'Expected list. Received {}: '
u'{}'.format(type(expression_list).__name__, expression_list))
if len(expression_list) == 0:
raise AssertionError(u'Received empty expression_list '
u'(function should never be called with empty list): '
u'{}'.format(expression_list))
elif len(expression_list) == 1:
return expression_list[0]
else:
remaining_conjunction = _expression_list_to_conjunction(expression_list[1:])
return BinaryComposition(u'&&', expression_list[0], remaining_conjunction)
|
Return a generator for expressions that are connected by `&&`s in the given expression.
|
def _extract_conjuction_elements_from_expression(expression):
"""Return a generator for expressions that are connected by `&&`s in the given expression."""
if isinstance(expression, BinaryComposition) and expression.operator == u'&&':
for element in _extract_conjuction_elements_from_expression(expression.left):
yield element
for element in _extract_conjuction_elements_from_expression(expression.right):
yield element
else:
yield expression
|
Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
|
def _construct_field_operator_expression_dict(expression_list):
"""Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
"""
between_operators = (u'<=', u'>=')
inverse_operator = {u'>=': u'<=', u'<=': u'>='}
local_field_to_expressions = {}
remaining_expression_list = deque([])
for expression in expression_list:
if all((
isinstance(expression, BinaryComposition),
expression.operator in between_operators,
isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField)
)):
if isinstance(expression.right, LocalField):
new_operator = inverse_operator[expression.operator]
new_expression = BinaryComposition(new_operator, expression.right, expression.left)
else:
new_expression = expression
field_name = new_expression.left.field_name
expressions_dict = local_field_to_expressions.setdefault(field_name, {})
expressions_dict.setdefault(new_expression.operator, []).append(new_expression)
else:
remaining_expression_list.append(expression)
return local_field_to_expressions, remaining_expression_list
|
Return a new expression, with any eligible comparisons lowered to `between` clauses.
|
def _lower_expressions_to_between(base_expression):
"""Return a new expression, with any eligible comparisons lowered to `between` clauses."""
expression_list = list(_extract_conjuction_elements_from_expression(base_expression))
if len(expression_list) == 0:
raise AssertionError(u'Received empty expression_list {} from base_expression: '
u'{}'.format(expression_list, base_expression))
elif len(expression_list) == 1:
return base_expression
else:
between_operators = (u'<=', u'>=')
local_field_to_expressions, new_expression_list = _construct_field_operator_expression_dict(
expression_list)
lowering_occurred = False
for field_name in local_field_to_expressions:
expressions_dict = local_field_to_expressions[field_name]
if all(operator in expressions_dict and len(expressions_dict[operator]) == 1
for operator in between_operators):
field = LocalField(field_name)
lower_bound = expressions_dict[u'>='][0].right
upper_bound = expressions_dict[u'<='][0].right
new_expression_list.appendleft(BetweenClause(field, lower_bound, upper_bound))
lowering_occurred = True
else:
for expression in expressions_dict.values():
new_expression_list.extend(expression)
if lowering_occurred:
return _expression_list_to_conjunction(list(new_expression_list))
else:
return base_expression
|
Return a new MatchQuery, with all eligible comparison filters lowered to between clauses.
|
def lower_comparisons_to_between(match_query):
"""Return a new MatchQuery, with all eligible comparison filters lowered to between clauses."""
new_match_traversals = []
for current_match_traversal in match_query.match_traversals:
new_traversal = []
for step in current_match_traversal:
if step.where_block:
expression = step.where_block.predicate
new_where_block = Filter(_lower_expressions_to_between(expression))
new_traversal.append(step._replace(where_block=new_where_block))
else:
new_traversal.append(step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
|
Ensure that all arguments expected by the query were actually provided.
|
def _ensure_arguments_are_provided(expected_types, arguments):
"""Ensure that all arguments expected by the query were actually provided."""
# This function only checks that the arguments were specified,
# and does not check types. Type checking is done as part of the actual formatting step.
expected_arg_names = set(six.iterkeys(expected_types))
provided_arg_names = set(six.iterkeys(arguments))
if expected_arg_names != provided_arg_names:
missing_args = expected_arg_names - provided_arg_names
unexpected_args = provided_arg_names - expected_arg_names
raise GraphQLInvalidArgumentError(u'Missing or unexpected arguments found: '
u'missing {}, unexpected '
u'{}'.format(missing_args, unexpected_args))
|
Insert the arguments into the compiled GraphQL query to form a complete query.
Args:
compilation_result: a CompilationResult object derived from the GraphQL compiler
arguments: dict, mapping argument name to its value, for every parameter the query expects.
Returns:
string, a query in the appropriate output language, with inserted argument data
|
def insert_arguments_into_query(compilation_result, arguments):
"""Insert the arguments into the compiled GraphQL query to form a complete query.
Args:
compilation_result: a CompilationResult object derived from the GraphQL compiler
arguments: dict, mapping argument name to its value, for every parameter the query expects.
Returns:
string, a query in the appropriate output language, with inserted argument data
"""
_ensure_arguments_are_provided(compilation_result.input_metadata, arguments)
if compilation_result.language == MATCH_LANGUAGE:
return insert_arguments_into_match_query(compilation_result, arguments)
elif compilation_result.language == GREMLIN_LANGUAGE:
return insert_arguments_into_gremlin_query(compilation_result, arguments)
elif compilation_result.language == SQL_LANGUAGE:
return insert_arguments_into_sql_query(compilation_result, arguments)
else:
raise AssertionError(u'Unrecognized language in compilation result: '
u'{}'.format(compilation_result))
|
Ensure that the QueryRoot block is valid.
|
def validate(self):
"""Ensure that the QueryRoot block is valid."""
if not (isinstance(self.start_class, set) and
all(isinstance(x, six.string_types) for x in self.start_class)):
raise TypeError(u'Expected set of string start_class, got: {} {}'.format(
type(self.start_class).__name__, self.start_class))
for cls in self.start_class:
validate_safe_string(cls)
|
Return a unicode object with the Gremlin representation of this block.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
if len(self.start_class) == 1:
# The official Gremlin documentation claims that this approach
# is generally faster than the one below, since it makes using indexes easier.
# http://gremlindocs.spmallette.documentup.com/#filter/has
start_class = list(self.start_class)[0]
return u'g.V({}, {})'.format('\'@class\'', safe_quoted_string(start_class))
else:
start_classes_list = ','.join(safe_quoted_string(x) for x in self.start_class)
return u'g.V.has(\'@class\', T.in, [{}])'.format(start_classes_list)
|
Ensure that the CoerceType block is valid.
|
def validate(self):
"""Ensure that the CoerceType block is valid."""
if not (isinstance(self.target_class, set) and
all(isinstance(x, six.string_types) for x in self.target_class)):
raise TypeError(u'Expected set of string target_class, got: {} {}'.format(
type(self.target_class).__name__, self.target_class))
for cls in self.target_class:
validate_safe_string(cls)
|
Ensure that the ConstructResult block is valid.
|
def validate(self):
"""Ensure that the ConstructResult block is valid."""
if not isinstance(self.fields, dict):
raise TypeError(u'Expected dict fields, got: {} {}'.format(
type(self.fields).__name__, self.fields))
for key, value in six.iteritems(self.fields):
validate_safe_string(key)
if not isinstance(value, Expression):
raise TypeError(
u'Expected Expression values in the fields dict, got: '
u'{} -> {}'.format(key, value))
|
Create an updated version (if needed) of the ConstructResult via the visitor pattern.
|
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the ConstructResult via the visitor pattern."""
new_fields = {}
for key, value in six.iteritems(self.fields):
new_value = value.visit_and_update(visitor_fn)
if new_value is not value:
new_fields[key] = new_value
if new_fields:
return ConstructResult(dict(self.fields, **new_fields))
else:
return self
|
Return a unicode object with the Gremlin representation of this block.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
template = (
u'transform{{'
u'it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([ {} ])'
u'}}')
field_representations = (
u'{name}: {expr}'.format(name=key, expr=self.fields[key].to_gremlin())
for key in sorted(self.fields.keys()) # Sort the keys for deterministic output order.
)
return template.format(u', '.join(field_representations))
|
Ensure that the Filter block is valid.
|
def validate(self):
"""Ensure that the Filter block is valid."""
if not isinstance(self.predicate, Expression):
raise TypeError(u'Expected Expression predicate, got: {} {}'.format(
type(self.predicate).__name__, self.predicate))
|
Create an updated version (if needed) of the Filter via the visitor pattern.
|
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the Filter via the visitor pattern."""
new_predicate = self.predicate.visit_and_update(visitor_fn)
if new_predicate is not self.predicate:
return Filter(new_predicate)
else:
return self
|
Return a unicode object with the Gremlin representation of this block.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
mark_name, _ = self.location.get_location_name()
return u'as({})'.format(safe_quoted_string(mark_name))
|
Ensure that the Traverse block is valid.
|
def validate(self):
"""Ensure that the Traverse block is valid."""
if not isinstance(self.direction, six.string_types):
raise TypeError(u'Expected string direction, got: {} {}'.format(
type(self.direction).__name__, self.direction))
validate_edge_direction(self.direction)
validate_safe_string(self.edge_name)
if not isinstance(self.optional, bool):
raise TypeError(u'Expected bool optional, got: {} {}'.format(
type(self.optional).__name__, self.optional))
if not isinstance(self.within_optional_scope, bool):
raise TypeError(u'Expected bool within_optional_scope, got: {} '
u'{}'.format(type(self.within_optional_scope).__name__,
self.within_optional_scope))
|
Return a unicode object with the Gremlin representation of this block.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
if self.optional:
# Optional edges have to be handled differently than non-optionals, since the compiler
# provides the guarantee that properties read from an optional, non-existing location
# always resolve to a "null" value. This guarantee is not upheld by default by Gremlin;
# in fact, Gremlin .as('foo').out().as('bar').optional('foo') does not provide
# ANY guarantees as to what the value at any "bar.*" is -- it could be "null",
# it could be a previous pipeline element's location at "bar.*" or anything else.
# The .ifThenElse block ensures that the edge traversal happens only if the edge exists,
# and that otherwise the result in the pipeline is replaced with "null".
#
# The code below makes the assumption that links to outward/inward edges are stored
# as vertex properties named "<direction>_<edge_name>" where direction is "in" or "out".
# For example, the links to outward edges named "Person_SpeechBy" from Person
# are assumed to be stored as "out_Person_SpeechBy" on the Person node.
return (u'ifThenElse{{it.{direction}_{edge_name} == null}}'
u'{{null}}{{it.{direction}({edge_quoted})}}'.format(
direction=self.direction,
edge_name=self.edge_name,
edge_quoted=safe_quoted_string(self.edge_name)))
elif self.within_optional_scope:
# During a traversal, the pipeline element may be null.
# The following code returns null when the current pipeline entity is null
# (an optional edge did not exist at some earlier traverse).
# Otherwise it performs a normal traversal (previous optional edge did exist).
return (u'ifThenElse{{it == null}}'
u'{{null}}{{it.{direction}({edge_quoted})}}'.format(
direction=self.direction,
edge_quoted=safe_quoted_string(self.edge_name)))
else:
return u'{direction}({edge})'.format(
direction=self.direction,
edge=safe_quoted_string(self.edge_name))
|
Ensure that the Traverse block is valid.
|
def validate(self):
"""Ensure that the Traverse block is valid."""
validate_edge_direction(self.direction)
validate_safe_string(self.edge_name)
if not isinstance(self.within_optional_scope, bool):
raise TypeError(u'Expected bool within_optional_scope, got: {} '
u'{}'.format(type(self.within_optional_scope).__name__,
self.within_optional_scope))
if not isinstance(self.depth, int):
raise TypeError(u'Expected int depth, got: {} {}'.format(
type(self.depth).__name__, self.depth))
if not (self.depth >= 1):
raise ValueError(u'depth ({}) >= 1 does not hold!'.format(self.depth))
|
Return a unicode object with the Gremlin representation of this block.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
template = 'copySplit({recurse}).exhaustMerge'
recurse_base = '_()'
recurse_traversal = '.{direction}(\'{edge_name}\')'.format(
direction=self.direction, edge_name=self.edge_name)
recurse_steps = [
recurse_base + (recurse_traversal * i)
for i in six.moves.xrange(self.depth + 1)
]
recursion_string = template.format(recurse=','.join(recurse_steps))
if self.within_optional_scope:
# During a traversal, the pipeline element may be null.
# The following code returns null when the current pipeline entity is null
# (an optional edge did not exist at some earlier traverse).
# Otherwise it performs a normal recursion (previous optional edge did exist).
recurse_template = u'ifThenElse{{it == null}}{{null}}{{it.{recursion_string}}}'
return recurse_template.format(recursion_string=recursion_string)
else:
return recursion_string
|
Ensure that the Backtrack block is valid.
|
def validate(self):
"""Ensure that the Backtrack block is valid."""
validate_marked_location(self.location)
if not isinstance(self.optional, bool):
raise TypeError(u'Expected bool optional, got: {} {}'.format(
type(self.optional).__name__, self.optional))
|
Return a unicode object with the Gremlin representation of this BasicBlock.
|
def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this BasicBlock."""
self.validate()
if self.optional:
operation = u'optional'
else:
operation = u'back'
mark_name, _ = self.location.get_location_name()
return u'{operation}({mark_name})'.format(
operation=operation,
mark_name=safe_quoted_string(mark_name))
|
Ensure the Fold block is valid.
|
def validate(self):
"""Ensure the Fold block is valid."""
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} '
u'{}'.format(type(self.fold_scope_location), self.fold_scope_location))
|
Lower the IR blocks into a form that can be represented by a SQL query.
Args:
ir_blocks: list of IR blocks to lower into SQL-compatible form
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
tree representation of IR blocks for recursive traversal by SQL backend.
|
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None):
"""Lower the IR blocks into a form that can be represented by a SQL query.
Args:
ir_blocks: list of IR blocks to lower into SQL-compatible form
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
tree representation of IR blocks for recursive traversal by SQL backend.
"""
_validate_all_blocks_supported(ir_blocks, query_metadata_table)
construct_result = _get_construct_result(ir_blocks)
query_path_to_location_info = _map_query_path_to_location_info(query_metadata_table)
query_path_to_output_fields = _map_query_path_to_outputs(
construct_result, query_path_to_location_info)
block_index_to_location = _map_block_index_to_location(ir_blocks)
# perform lowering steps
ir_blocks = lower_unary_transformations(ir_blocks)
ir_blocks = lower_unsupported_metafield_expressions(ir_blocks)
# iteratively construct SqlTree
query_path_to_node = {}
query_path_to_filters = {}
tree_root = None
for index, block in enumerate(ir_blocks):
if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES):
continue
location = block_index_to_location[index]
if isinstance(block, (blocks.QueryRoot,)):
query_path = location.query_path
if tree_root is not None:
raise AssertionError(
u'Encountered QueryRoot {} but tree root is already set to {} during '
u'construction of SQL query tree for IR blocks {} with query '
u'metadata table {}'.format(
block, tree_root, ir_blocks, query_metadata_table))
tree_root = SqlNode(block=block, query_path=query_path)
query_path_to_node[query_path] = tree_root
elif isinstance(block, blocks.Filter):
query_path_to_filters.setdefault(query_path, []).append(block)
else:
raise AssertionError(
u'Unsupported block {} unexpectedly passed validation for IR blocks '
u'{} with query metadata table {} .'.format(block, ir_blocks, query_metadata_table))
return SqlQueryTree(tree_root, query_path_to_location_info, query_path_to_output_fields,
query_path_to_filters, query_path_to_node)
|
Validate that all IR blocks and ConstructResult fields passed to the backend are supported.
Args:
ir_blocks: List[BasicBlock], IR blocks to validate.
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Raises:
NotImplementedError, if any block or ConstructResult field is unsupported.
|
def _validate_all_blocks_supported(ir_blocks, query_metadata_table):
"""Validate that all IR blocks and ConstructResult fields passed to the backend are supported.
Args:
ir_blocks: List[BasicBlock], IR blocks to validate.
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Raises:
NotImplementedError, if any block or ConstructResult field is unsupported.
"""
if len(ir_blocks) < 3:
raise AssertionError(
u'Unexpectedly attempting to validate IR blocks with fewer than 3 blocks. A minimal '
u'query is expected to have at least a QueryRoot, GlobalOperationsStart, and '
u'ConstructResult block. The query metadata table is {}.'.format(query_metadata_table))
construct_result = _get_construct_result(ir_blocks)
unsupported_blocks = []
unsupported_fields = []
for block in ir_blocks[:-1]:
if isinstance(block, constants.SUPPORTED_BLOCK_TYPES):
continue
if isinstance(block, constants.SKIPPABLE_BLOCK_TYPES):
continue
unsupported_blocks.append(block)
for field_name, field in six.iteritems(construct_result.fields):
if not isinstance(field, constants.SUPPORTED_OUTPUT_EXPRESSION_TYPES):
unsupported_fields.append((field_name, field))
elif field.location.field in constants.UNSUPPORTED_META_FIELDS:
unsupported_fields.append((field_name, field))
if len(unsupported_blocks) > 0 or len(unsupported_fields) > 0:
raise NotImplementedError(
u'Encountered unsupported blocks {} and unsupported fields {} during construction of '
u'SQL query tree for IR blocks {} with query metadata table {}.'.format(
unsupported_blocks, unsupported_fields, ir_blocks, query_metadata_table))
|
Return the ConstructResult block from a list of IR blocks.
|
def _get_construct_result(ir_blocks):
"""Return the ConstructResult block from a list of IR blocks."""
last_block = ir_blocks[-1]
if not isinstance(last_block, blocks.ConstructResult):
raise AssertionError(
u'The last IR block {} for IR blocks {} was unexpectedly not '
u'a ConstructResult block.'.format(last_block, ir_blocks))
return last_block
|
Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
|
def _map_query_path_to_location_info(query_metadata_table):
"""Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
"""
query_path_to_location_info = {}
for location, location_info in query_metadata_table.registered_locations:
if not isinstance(location, Location):
continue
if location.query_path in query_path_to_location_info:
# make sure the stored location information equals the new location information
# for the fields the SQL backend requires.
equivalent_location_info = query_path_to_location_info[location.query_path]
if not _location_infos_equal(location_info, equivalent_location_info):
raise AssertionError(
u'Differing LocationInfos at query_path {} between {} and {}. Expected '
u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth '
u'and types to be equal for LocationInfos sharing the same query path.'.format(
location.query_path, location_info, equivalent_location_info))
query_path_to_location_info[location.query_path] = location_info
return query_path_to_location_info
|
Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise.
LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth,
recursive scopes depth, types and parent query paths are equal.
Args:
left: LocationInfo, left location info object to compare.
right: LocationInfo, right location info object to compare.
Returns:
bool, True if LocationInfo objects equivalent, False otherwise.
|
def _location_infos_equal(left, right):
"""Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise.
LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth,
recursive scopes depth, types and parent query paths are equal.
Args:
left: LocationInfo, left location info object to compare.
right: LocationInfo, right location info object to compare.
Returns:
bool, True if LocationInfo objects equivalent, False otherwise.
"""
if not isinstance(left, LocationInfo) or not isinstance(right, LocationInfo):
raise AssertionError(
u'Unsupported LocationInfo comparison between types {} and {} '
u'with values {}, {}'.format(type(left), type(right), left, right))
optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth)
parent_query_paths_equal = (
(left.parent_location is None and right.parent_location is None) or
(left.parent_location.query_path == right.parent_location.query_path))
recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth)
types_equal = left.type == right.type
return all([
optional_scopes_depth_equal,
parent_query_paths_equal,
recursive_scopes_depths_equal,
types_equal,
])
|
Assign the output fields of a ConstructResult block to their respective query_path.
|
def _map_query_path_to_outputs(construct_result, query_path_to_location_info):
"""Assign the output fields of a ConstructResult block to their respective query_path."""
query_path_to_output_fields = {}
for output_name, field in six.iteritems(construct_result.fields):
field_name = field.location.field
output_query_path = field.location.query_path
output_field_info = constants.SqlOutput(
field_name=field_name,
output_name=output_name,
graphql_type=query_path_to_location_info[output_query_path].type)
output_field_mapping = query_path_to_output_fields.setdefault(output_query_path, [])
output_field_mapping.append(output_field_info)
return query_path_to_output_fields
|
Associate each IR block with its corresponding location, by index.
|
def _map_block_index_to_location(ir_blocks):
"""Associate each IR block with its corresponding location, by index."""
block_index_to_location = {}
# MarkLocation blocks occur after the blocks related to that location.
# The core approach here is to buffer blocks until their MarkLocation is encountered
# after which all buffered blocks can be associated with the encountered MarkLocation.location.
current_block_ixs = []
for num, ir_block in enumerate(ir_blocks):
if isinstance(ir_block, blocks.GlobalOperationsStart):
if len(current_block_ixs) > 0:
unassociated_blocks = [ir_blocks[ix] for ix in current_block_ixs]
raise AssertionError(
u'Unexpectedly encountered global operations before mapping blocks '
u'{} to their respective locations.'.format(unassociated_blocks))
break
current_block_ixs.append(num)
if isinstance(ir_block, blocks.MarkLocation):
for ix in current_block_ixs:
block_index_to_location[ix] = ir_block.location
current_block_ixs = []
return block_index_to_location
|
Raise exception if any unary transformation block encountered.
|
def lower_unary_transformations(ir_blocks):
"""Raise exception if any unary transformation block encountered."""
def visitor_fn(expression):
"""Raise error if current expression is a UnaryTransformation."""
if not isinstance(expression, expressions.UnaryTransformation):
return expression
raise NotImplementedError(
u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by '
u'the SQL backend.'.format(expression, ir_blocks)
)
new_ir_blocks = [
block.visit_and_update_expressions(visitor_fn)
for block in ir_blocks
]
return new_ir_blocks
|
Raise exception if an unsupported metafield is encountered in any LocalField expression.
|
def lower_unsupported_metafield_expressions(ir_blocks):
"""Raise exception if an unsupported metafield is encountered in any LocalField expression."""
def visitor_fn(expression):
"""Visitor function raising exception for any unsupported metafield."""
if not isinstance(expression, expressions.LocalField):
return expression
if expression.field_name not in constants.UNSUPPORTED_META_FIELDS:
return expression
raise NotImplementedError(
u'Encountered unsupported metafield {} in LocalField {} during construction of '
u'SQL query tree for IR blocks {}.'.format(
constants.UNSUPPORTED_META_FIELDS[expression.field_name], expression, ir_blocks))
new_ir_blocks = [
block.visit_and_update_expressions(visitor_fn)
for block in ir_blocks
]
return new_ir_blocks
|
Compile the GraphQL input using the schema into a MATCH query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to MATCH, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
|
def graphql_to_match(schema, graphql_query, parameters, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a MATCH query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to MATCH, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
"""
compilation_result = compile_graphql_to_match(
schema, graphql_query, type_equivalence_hints=type_equivalence_hints)
return compilation_result._replace(
query=insert_arguments_into_query(compilation_result, parameters))
|
Compile the GraphQL input using the schema into a SQL query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to SQL, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
compiler_metadata: SqlMetadata object, provides SQLAlchemy specific backend
information
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
|
def graphql_to_sql(schema, graphql_query, parameters, compiler_metadata,
type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a SQL query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to SQL, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
compiler_metadata: SqlMetadata object, provides SQLAlchemy specific backend
information
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
"""
compilation_result = compile_graphql_to_sql(
schema, graphql_query, compiler_metadata, type_equivalence_hints=type_equivalence_hints)
return compilation_result._replace(
query=insert_arguments_into_query(compilation_result, parameters))
|
Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to Gremlin, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
|
def graphql_to_gremlin(schema, graphql_query, parameters, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_query: the GraphQL query to compile to Gremlin, as a string
parameters: dict, mapping argument name to its value, for every parameter the query expects.
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object, containing:
- query: string, the resulting compiled and parameterized query string
- language: string, specifying the language to which the query was compiled
- output_metadata: dict, output name -> OutputMetadata namedtuple object
- input_metadata: dict, name of input variables -> inferred GraphQL type, based on use
"""
compilation_result = compile_graphql_to_gremlin(
schema, graphql_query, type_equivalence_hints=type_equivalence_hints)
return compilation_result._replace(
query=insert_arguments_into_query(compilation_result, parameters))
|
Construct a GraphQL schema from an OrientDB schema.
Args:
schema_data: list of dicts describing the classes in the OrientDB schema. The following
format is the way the data is structured in OrientDB 2. See
the README.md file for an example of how to query this data.
Each dict has the following string fields:
- name: string, the name of the class.
- superClasses (optional): list of strings, the name of the class's
superclasses.
- superClass (optional): string, the name of the class's superclass. May be
used instead of superClasses if there is only one
superClass. Used for backwards compatibility with
OrientDB.
- customFields (optional): dict, string -> string, data defined on the class
instead of instances of the class.
- abstract: bool, true if the class is abstract.
- properties: list of dicts, describing the class's properties.
Each property dictionary has the following string fields:
- name: string, the name of the property.
- type: int, builtin OrientDB type ID of the property.
See schema_properties.py for the mapping.
- linkedType (optional): int, if the property is a
collection of builtin OrientDB
objects, then it indicates their
type ID.
- linkedClass (optional): string, if the property is a
collection of class instances,
then it indicates the name of
the class. If class is an edge
class, and the field name is
either 'in' or 'out', then it
describes the name of an
endpoint of the edge.
- defaultValue: string, the textual representation of the
default value for the property, as
returned by OrientDB's schema
introspection code, e.g., '{}' for
the embedded set type. Note that if the
property is a collection type, it must
have a default value.
class_to_field_type_overrides: optional dict, class name -> {field name -> field type},
(string -> {string -> GraphQLType}). Used to override the
type of a field in the class where it's first defined and all
the class's subclasses.
hidden_classes: optional set of strings, classes to not include in the GraphQL schema.
Returns:
tuple of (GraphQL schema object, GraphQL type equivalence hints dict).
The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
|
def get_graphql_schema_from_orientdb_schema_data(schema_data, class_to_field_type_overrides=None,
hidden_classes=None):
"""Construct a GraphQL schema from an OrientDB schema.
Args:
schema_data: list of dicts describing the classes in the OrientDB schema. The following
format is the way the data is structured in OrientDB 2. See
the README.md file for an example of how to query this data.
Each dict has the following string fields:
- name: string, the name of the class.
- superClasses (optional): list of strings, the name of the class's
superclasses.
- superClass (optional): string, the name of the class's superclass. May be
used instead of superClasses if there is only one
superClass. Used for backwards compatibility with
OrientDB.
- customFields (optional): dict, string -> string, data defined on the class
instead of instances of the class.
- abstract: bool, true if the class is abstract.
- properties: list of dicts, describing the class's properties.
Each property dictionary has the following string fields:
- name: string, the name of the property.
- type: int, builtin OrientDB type ID of the property.
See schema_properties.py for the mapping.
- linkedType (optional): int, if the property is a
collection of builtin OrientDB
objects, then it indicates their
type ID.
- linkedClass (optional): string, if the property is a
collection of class instances,
then it indicates the name of
the class. If class is an edge
class, and the field name is
either 'in' or 'out', then it
describes the name of an
endpoint of the edge.
- defaultValue: string, the textual representation of the
default value for the property, as
returned by OrientDB's schema
introspection code, e.g., '{}' for
the embedded set type. Note that if the
property is a collection type, it must
have a default value.
class_to_field_type_overrides: optional dict, class name -> {field name -> field type},
(string -> {string -> GraphQLType}). Used to override the
type of a field in the class where it's first defined and all
the class's subclasses.
hidden_classes: optional set of strings, classes to not include in the GraphQL schema.
Returns:
tuple of (GraphQL schema object, GraphQL type equivalence hints dict).
The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
"""
if class_to_field_type_overrides is None:
class_to_field_type_overrides = dict()
if hidden_classes is None:
hidden_classes = set()
schema_graph = SchemaGraph(schema_data)
return get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides,
hidden_classes)
|
Return a MATCH query string from a list of IR blocks.
|
def emit_code_from_ir(ir_blocks, compiler_metadata):
"""Return a MATCH query string from a list of IR blocks."""
gremlin_steps = (
block.to_gremlin()
for block in ir_blocks
)
# OutputSource blocks translate to empty steps.
# Discard such empty steps so we don't end up with an incorrect concatenation.
non_empty_steps = (
step
for step in gremlin_steps
if step
)
return u'.'.join(non_empty_steps)
|
Start the built in webserver, bound to the host and port you'd like.
Default host is `127.0.0.1` and port 8080.
:param host: The host you want to bind the build in webserver to
:param port: The port number you want the webserver to run on
:param debug: Set to `True` to enable debug level logging
:param kwargs: Additional arguments you'd like to pass to Flask
|
def start(self, host='127.0.0.1', port=None, debug=False, **kwargs):
"""
Start the built in webserver, bound to the host and port you'd like.
Default host is `127.0.0.1` and port 8080.
:param host: The host you want to bind the build in webserver to
:param port: The port number you want the webserver to run on
:param debug: Set to `True` to enable debug level logging
:param kwargs: Additional arguments you'd like to pass to Flask
"""
self.server.run(host=host, port=port, debug=debug, **kwargs)
|
Logs in the user via given login and password.
|
def login(request):
'''
Logs in the user via given login and password.
'''
serializer_class = registration_settings.LOGIN_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.get_authenticated_user()
if not user:
raise BadRequest('Login or password invalid.')
extra_data = perform_login(request, user)
return get_ok_response('Login successful', extra_data=extra_data)
|
Logs out the user. returns an error if the user is not
authenticated.
|
def logout(request):
'''
Logs out the user. returns an error if the user is not
authenticated.
'''
user = request.user
serializer = LogoutSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
if should_authenticate_session():
auth.logout(request)
if should_retrieve_token() and data['revoke_token']:
try:
user.auth_token.delete()
except Token.DoesNotExist:
raise BadRequest('Cannot remove non-existent token')
return get_ok_response('Logout successful')
|
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
This function was copied from rest_framework.generics because of issue #36.
|
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
This function was copied from rest_framework.generics because of issue #36.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404
|
Get or set user profile.
|
def profile(request):
'''
Get or set user profile.
'''
serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS
if request.method in ['POST', 'PUT', 'PATCH']:
partial = request.method == 'PATCH'
serializer = serializer_class(
instance=request.user,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
serializer.save()
else: # request.method == 'GET':
serializer = serializer_class(instance=request.user)
return Response(serializer.data)
|
Register new user.
|
def register(request):
'''
Register new user.
'''
serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
kwargs = {}
if registration_settings.REGISTER_VERIFICATION_ENABLED:
verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD')
kwargs[verification_flag_field] = False
email_field = get_user_setting('EMAIL_FIELD')
if (email_field not in serializer.validated_data
or not serializer.validated_data[email_field]):
raise BadRequest("User without email cannot be verified")
user = serializer.save(**kwargs)
output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS # noqa: E501
output_serializer = output_serializer_class(instance=user)
user_data = output_serializer.data
if registration_settings.REGISTER_VERIFICATION_ENABLED:
signer = RegisterSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return Response(user_data, status=status.HTTP_201_CREATED)
|
Verify registration via signature.
|
def verify_registration(request):
"""
Verify registration via signature.
"""
user = process_verify_registration_data(request.data)
extra_data = None
if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN:
extra_data = perform_login(request, user)
return get_ok_response('User verified successfully', extra_data=extra_data)
|
Return list of this package requirements via local filepath.
|
def get_requirements(requirements_filepath):
'''
Return list of this package requirements via local filepath.
'''
requirements = []
with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f:
for line in f:
if line.startswith('#'):
continue
line = line.rstrip()
if not line:
continue
requirements.append(line)
return requirements
|
Change the user password.
|
def change_password(request):
'''
Change the user password.
'''
serializer = ChangePasswordSerializer(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['password'])
user.save()
return get_ok_response('Password changed successfully')
|
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
|
def parse_template_config(template_config_data):
"""
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
"""
try:
subject_template_name = template_config_data['subject']
except KeyError:
raise ImproperlyConfigured("No 'subject' key found")
body_template_name = template_config_data.get('body')
text_body_template_name = template_config_data.get('text_body')
html_body_template_name = template_config_data.get('html_body')
is_html_body = template_config_data.get('is_html')
convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
if html_body_template_name and text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=identity,
)
elif html_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=html_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=convert_html_to_text,
)
elif text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
elif body_template_name:
if is_html_body:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=body_template_name,
text_body_processor=convert_html_to_text,
)
else:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
else:
raise ImproperlyConfigured(
'Could not parse template config data: {template_config_data}'.format( # noqa: E501
template_config_data=template_config_data))
_validate_template_name_existence(config.subject_template_name)
_validate_template_name_existence(config.text_body_template_name)
if config.html_body_template_name:
_validate_template_name_existence(config.html_body_template_name)
assert callable(config.text_body_processor)
return config
|
Send email with reset password link.
|
def send_reset_password_link(request):
'''
Send email with reset password link.
'''
if not registration_settings.RESET_PASSWORD_VERIFICATION_ENABLED:
raise Http404()
serializer = SendResetPasswordLinkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
login = serializer.validated_data['login']
user = None
for login_field in get_login_fields():
user = get_user_by_lookup_dict(
{login_field: login}, default=None, require_verified=False)
if user:
break
if not user:
raise UserNotFound()
signer = ResetPasswordSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return get_ok_response('Reset link sent')
|
r"""
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click
... <a href="https://example.com">here</a>
... </body></html>''', preserve_urls=True)
'Look & click here (https://example.com)'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click
... <a href="https://example.com?timestamp=1234">here</a>
... </body></html>''', preserve_urls=True)
'Look & click here (https://example.com?timestamp=1234)'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click here
... </body></html>''', preserve_urls=True)
'Look & click here'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click on
... <a href="https://example.com">https://example.com</a>
... </body></html>''', preserve_urls=True)
'Look & click on https://example.com'
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br> click
... <a href="https://example.com">me</a>
... </body></html>''', preserve_urls=True)
"I'm here,\nclick me (https://example.com)"
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br/> click
... <a href="https://example.com">me</a>
... </body></html>''', preserve_urls=True)
"I'm here,\nclick me (https://example.com)"
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br/> click
... <a href="https://example.com">me</a>
... </body></html>''')
"I'm here,\nclick me"
>>> convert_html_to_text(
... '''
... <html><body>
... <p>I'm here!</p>
... <p>Click <a href="https://example.com">me</a></p>
... </body></html>''', preserve_urls=True)
"I'm here!\nClick me (https://example.com)\n"
>>> convert_html_to_text(
... '''
... <html>
... <head>
... <title>I'm here</title>
... </head>
... <body>
... <p>I'm here!</p>
... <p>Click <a href="https://example.com">me</a></p>
... </body>
... </html>''', preserve_urls=True)
"I'm here!\nClick me (https://example.com)\n"
|
def convert_html_to_text(value, preserve_urls=False):
r"""
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click
... <a href="https://example.com">here</a>
... </body></html>''', preserve_urls=True)
'Look & click here (https://example.com)'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click
... <a href="https://example.com?timestamp=1234">here</a>
... </body></html>''', preserve_urls=True)
'Look & click here (https://example.com?timestamp=1234)'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click here
... </body></html>''', preserve_urls=True)
'Look & click here'
>>> convert_html_to_text(
... '''
... <html><body>
... Look & click on
... <a href="https://example.com">https://example.com</a>
... </body></html>''', preserve_urls=True)
'Look & click on https://example.com'
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br> click
... <a href="https://example.com">me</a>
... </body></html>''', preserve_urls=True)
"I'm here,\nclick me (https://example.com)"
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br/> click
... <a href="https://example.com">me</a>
... </body></html>''', preserve_urls=True)
"I'm here,\nclick me (https://example.com)"
>>> convert_html_to_text(
... '''
... <html><body>
... I'm here, <br/> click
... <a href="https://example.com">me</a>
... </body></html>''')
"I'm here,\nclick me"
>>> convert_html_to_text(
... '''
... <html><body>
... <p>I'm here!</p>
... <p>Click <a href="https://example.com">me</a></p>
... </body></html>''', preserve_urls=True)
"I'm here!\nClick me (https://example.com)\n"
>>> convert_html_to_text(
... '''
... <html>
... <head>
... <title>I'm here</title>
... </head>
... <body>
... <p>I'm here!</p>
... <p>Click <a href="https://example.com">me</a></p>
... </body>
... </html>''', preserve_urls=True)
"I'm here!\nClick me (https://example.com)\n"
"""
s = MLStripper(preserve_urls=preserve_urls)
s.feed(value)
s.close()
return s.get_data()
|
Register new email.
|
def register_email(request):
'''
Register new email.
'''
user = request.user
serializer = RegisterEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data['email']
template_config = (
registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES)
if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED:
signer = RegisterEmailSigner({
'user_id': user.pk,
'email': email,
}, request=request)
send_verification_notification(
user, signer, template_config, email=email)
else:
email_field = get_user_setting('EMAIL_FIELD')
setattr(user, email_field, email)
user.save()
return get_ok_response('Register email link email sent')
|
This is sample for implement BOT in LINE group
Invite your BOT to group, then BOT will auto accept your invitation
Command availabe :
> hi
> /author
|
def RECEIVE_MESSAGE(op):
'''
This is sample for implement BOT in LINE group
Invite your BOT to group, then BOT will auto accept your invitation
Command availabe :
> hi
> /author
'''
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
try:
# Check content only text message
if msg.contentType == 0:
# Check only group chat
if msg.toType == 2:
# Chat checked request
line.sendChatChecked(receiver, msg_id)
# Get sender contact
contact = line.getContact(sender)
# Command list
if text.lower() == 'hi':
line.log('[%s] %s' % (contact.displayName, text))
line.sendMessage(receiver, 'Hi too! How are you?')
elif text.lower() == '/author':
line.log('[%s] %s' % (contact.displayName, text))
line.sendMessage(receiver, 'My author is linepy')
except Exception as e:
line.log("[RECEIVE_MESSAGE] ERROR : " + str(e))
|
Gets a {'x','y'}, a number of ticks and ticks labels, and returns the
necessary axis options for the given configuration.
|
def _get_ticks(data, xy, ticks, ticklabels):
"""Gets a {'x','y'}, a number of ticks and ticks labels, and returns the
necessary axis options for the given configuration.
"""
axis_options = []
pgfplots_ticks = []
pgfplots_ticklabels = []
is_label_required = False
for tick, ticklabel in zip(ticks, ticklabels):
pgfplots_ticks.append(tick)
# store the label anyway
label = ticklabel.get_text()
if ticklabel.get_visible():
label = mpl_backend_pgf.common_texification(label)
pgfplots_ticklabels.append(label)
else:
is_label_required = True
# Check if the label is necessary. If one of the labels is, then all of them
# must appear in the TikZ plot.
if label:
try:
label_float = float(label.replace(u"\N{MINUS SIGN}", "-"))
is_label_required = is_label_required or (label and label_float != tick)
except ValueError:
is_label_required = True
# Leave the ticks to PGFPlots if not in STRICT mode and if there are no explicit
# labels.
if data["strict"] or is_label_required:
if pgfplots_ticks:
ff = data["float format"]
axis_options.append(
"{}tick={{{}}}".format(
xy, ",".join([ff.format(el) for el in pgfplots_ticks])
)
)
else:
val = "{}" if "minor" in xy else "\\empty"
axis_options.append("{}tick={}".format(xy, val))
if is_label_required:
axis_options.append(
"{}ticklabels={{{}}}".format(xy, ",".join(pgfplots_ticklabels))
)
return axis_options
|
Find out if the object is in fact a color bar.
|
def _is_colorbar_heuristic(obj):
"""Find out if the object is in fact a color bar.
"""
# TODO come up with something more accurate here
# Might help:
# TODO Are the colorbars exactly the l.collections.PolyCollection's?
try:
aspect = float(obj.get_aspect())
except ValueError:
# e.g., aspect == 'equal'
return False
# Assume that something is a colorbar if and only if the ratio is above 5.0
# and there are no ticks on the corresponding axis. This isn't always true,
# though: The ratio of a color can be freely adjusted by the aspect
# keyword, e.g.,
#
# plt.colorbar(im, aspect=5)
#
limit_ratio = 5.0
return (aspect >= limit_ratio and len(obj.get_xticks()) == 0) or (
aspect <= 1.0 / limit_ratio and len(obj.get_yticks()) == 0
)
|
Converts a color map as given in matplotlib to a color map as
represented in PGFPlots.
|
def _mpl_cmap2pgf_cmap(cmap, data):
"""Converts a color map as given in matplotlib to a color map as
represented in PGFPlots.
"""
if isinstance(cmap, mpl.colors.LinearSegmentedColormap):
return _handle_linear_segmented_color_map(cmap, data)
assert isinstance(
cmap, mpl.colors.ListedColormap
), "Only LinearSegmentedColormap and ListedColormap are supported"
return _handle_listed_color_map(cmap, data)
|
Scales the array X such that it contains only integers.
|
def _scale_to_int(X, max_val=None):
"""
Scales the array X such that it contains only integers.
"""
if max_val is None:
X = X / _gcd_array(X)
else:
X = X / max(1 / max_val, _gcd_array(X))
return [int(entry) for entry in X]
|
Return the largest real value h such that all elements in x are integer
multiples of h.
|
def _gcd_array(X):
"""
Return the largest real value h such that all elements in x are integer
multiples of h.
"""
greatest_common_divisor = 0.0
for x in X:
greatest_common_divisor = _gcd(greatest_common_divisor, x)
return greatest_common_divisor
|
Given two data points [X,Y], linearly interpolate those at x.
|
def _linear_interpolation(x, X, Y):
"""Given two data points [X,Y], linearly interpolate those at x.
"""
return (Y[1] * (x - X[0]) + Y[0] * (X[1] - x)) / (X[1] - X[0])
|
A rather poor way of telling whether an axis has a colorbar associated:
Check the next axis environment, and see if it is de facto a color bar;
if yes, return the color bar object.
|
def _find_associated_colorbar(obj):
"""A rather poor way of telling whether an axis has a colorbar associated:
Check the next axis environment, and see if it is de facto a color bar;
if yes, return the color bar object.
"""
for child in obj.get_children():
try:
cbar = child.colorbar
except AttributeError:
continue
if cbar is not None: # really necessary?
# if fetch was successful, cbar contains
# (reference to colorbar,
# reference to axis containing colorbar)
return cbar
return None
|
Adds legend code.
|
def draw_legend(data, obj):
"""Adds legend code.
"""
texts = []
children_alignment = []
for text in obj.texts:
texts.append("{}".format(text.get_text()))
children_alignment.append("{}".format(text.get_horizontalalignment()))
# Get the location.
# http://matplotlib.org/api/legend_api.html
loc = obj._loc if obj._loc != 0 else _get_location_from_best(obj)
pad = 0.03
position, anchor = {
1: (None, None), # upper right
2: ([pad, 1.0 - pad], "north west"), # upper left
3: ([pad, pad], "south west"), # lower left
4: ([1.0 - pad, pad], "south east"), # lower right
5: ([1.0 - pad, 0.5], "east"), # right
6: ([3 * pad, 0.5], "west"), # center left
7: ([1.0 - 3 * pad, 0.5], "east"), # center right
8: ([0.5, 3 * pad], "south"), # lower center
9: ([0.5, 1.0 - 3 * pad], "north"), # upper center
10: ([0.5, 0.5], "center"), # center
}[loc]
# In case of given position via bbox_to_anchor parameter the center
# of legend is changed as follows:
if obj._bbox_to_anchor:
bbox_center = obj.get_bbox_to_anchor()._bbox._points[1]
position = [bbox_center[0], bbox_center[1]]
legend_style = []
if position:
ff = data["float format"]
legend_style.append(
("at={{(" + ff + "," + ff + ")}}").format(position[0], position[1])
)
if anchor:
legend_style.append("anchor={}".format(anchor))
# Get the edgecolor of the box
if obj.get_frame_on():
edgecolor = obj.get_frame().get_edgecolor()
data, frame_xcolor, _ = mycol.mpl_color2xcolor(data, edgecolor)
if frame_xcolor != "black": # black is default
legend_style.append("draw={}".format(frame_xcolor))
else:
legend_style.append("draw=none")
# Get the facecolor of the box
facecolor = obj.get_frame().get_facecolor()
data, fill_xcolor, _ = mycol.mpl_color2xcolor(data, facecolor)
if fill_xcolor != "white": # white is default
legend_style.append("fill={}".format(fill_xcolor))
# Get the horizontal alignment
try:
alignment = children_alignment[0]
except IndexError:
alignment = None
for child_alignment in children_alignment:
if alignment != child_alignment:
warnings.warn("Varying horizontal alignments in the legend. Using default.")
alignment = None
break
if alignment:
data["current axes"].axis_options.append(
"legend cell align={{{}}}".format(alignment)
)
if obj._ncol != 1:
data["current axes"].axis_options.append("legend columns={}".format(obj._ncol))
# Write styles to data
if legend_style:
style = "legend style={{{}}}".format(", ".join(legend_style))
data["current axes"].axis_options.append(style)
return data
|
Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code.
|
def new_filename(data, file_kind, ext):
"""Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code.
"""
nb_key = file_kind + "number"
if nb_key not in data.keys():
data[nb_key] = -1
if not data["override externals"]:
# Make sure not to overwrite anything.
file_exists = True
while file_exists:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
file_exists = os.path.isfile(filename)
else:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
if data["rel data path"]:
rel_filepath = posixpath.join(data["rel data path"], name)
else:
rel_filepath = name
return filename, rel_filepath
|
Adds code for drawing an ordinary path in PGFPlots (TikZ).
|
def draw_path(data, path, draw_options=None, simplify=None):
"""Adds code for drawing an ordinary path in PGFPlots (TikZ).
"""
# For some reasons, matplotlib sometimes adds void paths which consist of
# only one point and have 0 fill opacity. To not let those clutter the
# output TeX file, bail out here.
if (
len(path.vertices) == 2
and all(path.vertices[0] == path.vertices[1])
and "fill opacity=0" in draw_options
):
return data, "", None, False
nodes = []
ff = data["float format"]
prev = None
for vert, code in path.iter_segments(simplify=simplify):
# nschloe, Oct 2, 2015:
# The transform call yields warnings and it is unclear why. Perhaps
# the input data is not suitable? Anyhow, this should not happen.
# Comment out for now.
# vert = numpy.asarray(
# _transform_to_data_coordinates(obj, [vert[0]], [vert[1]])
# )
# For path codes see: http://matplotlib.org/api/path_api.html
#
# if code == mpl.path.Path.STOP: pass
is_area = False
if code == mpl.path.Path.MOVETO:
nodes.append(("(axis cs:" + ff + "," + ff + ")").format(*vert))
elif code == mpl.path.Path.LINETO:
nodes.append(("--(axis cs:" + ff + "," + ff + ")").format(*vert))
elif code == mpl.path.Path.CURVE3:
# Quadratic Bezier curves aren't natively supported in TikZ, but
# can be emulated as cubic Beziers.
# From
# http://www.latex-community.org/forum/viewtopic.php?t=4424&f=45:
# If you really need a quadratic Bézier curve on the points P0, P1
# and P2, then a process called 'degree elevation' yields the cubic
# control points (Q0, Q1, Q2 and Q3) as follows:
# CODE: SELECT ALL
# Q0 = P0
# Q1 = 1/3 P0 + 2/3 P1
# Q2 = 2/3 P1 + 1/3 P2
# Q3 = P2
#
# P0 is the point of the previous step which is needed to compute
# Q1.
#
# Cannot draw quadratic Bezier curves as the beginning of of a path
assert prev is not None
Q1 = 1.0 / 3.0 * prev + 2.0 / 3.0 * vert[0:2]
Q2 = 2.0 / 3.0 * vert[0:2] + 1.0 / 3.0 * vert[2:4]
Q3 = vert[2:4]
nodes.append(
(
".. controls (axis cs:"
+ ff
+ ","
+ ff
+ ") "
+ "and (axis cs:"
+ ff
+ ","
+ ff
+ ") "
+ ".. (axis cs:"
+ ff
+ ","
+ ff
+ ")"
).format(Q1[0], Q1[1], Q2[0], Q2[1], Q3[0], Q3[1])
)
elif code == mpl.path.Path.CURVE4:
# Cubic Bezier curves.
nodes.append(
(
".. controls (axis cs:"
+ ff
+ ","
+ ff
+ ") "
+ "and (axis cs:"
+ ff
+ ","
+ ff
+ ") "
+ ".. (axis cs:"
+ ff
+ ","
+ ff
+ ")"
).format(*vert)
)
else:
assert code == mpl.path.Path.CLOSEPOLY
nodes.append("--cycle")
is_area = True
# Store the previous point for quadratic Beziers.
prev = vert[0:2]
do = "[{}]".format(", ".join(draw_options)) if draw_options else ""
path_command = "\\path {}\n{};\n".format(do, "\n".join(nodes))
return data, path_command, draw_options, is_area
|
Returns PGFPlots code for a number of patch objects.
|
def draw_pathcollection(data, obj):
"""Returns PGFPlots code for a number of patch objects.
"""
content = []
# gather data
assert obj.get_offsets() is not None
labels = ["x" + 21 * " ", "y" + 21 * " "]
dd = obj.get_offsets()
draw_options = ["only marks"]
table_options = []
if obj.get_array() is not None:
draw_options.append("scatter")
dd = numpy.column_stack([dd, obj.get_array()])
labels.append("colordata" + 13 * " ")
draw_options.append("scatter src=explicit")
table_options.extend(["x=x", "y=y", "meta=colordata"])
ec = None
fc = None
ls = None
else:
# gather the draw options
try:
ec = obj.get_edgecolors()[0]
except (TypeError, IndexError):
ec = None
try:
fc = obj.get_facecolors()[0]
except (TypeError, IndexError):
fc = None
try:
ls = obj.get_linestyle()[0]
except (TypeError, IndexError):
ls = None
is_contour = len(dd) == 1
if is_contour:
draw_options = ["draw=none"]
# `only mark` plots don't need linewidth
data, extra_draw_options = get_draw_options(data, obj, ec, fc, ls, None)
draw_options.extend(extra_draw_options)
if obj.get_cmap():
mycolormap, is_custom_cmap = _mpl_cmap2pgf_cmap(obj.get_cmap(), data)
draw_options.append("colormap" + ("=" if is_custom_cmap else "/") + mycolormap)
legend_text = get_legend_text(obj)
if legend_text is None and has_legend(obj.axes):
draw_options.append("forget plot")
for path in obj.get_paths():
if is_contour:
dd = path.vertices
if len(obj.get_sizes()) == len(dd):
# See Pgfplots manual, chapter 4.25.
# In Pgfplots, \mark size specifies raddi, in matplotlib circle areas.
radii = numpy.sqrt(obj.get_sizes() / numpy.pi)
dd = numpy.column_stack([dd, radii])
labels.append("sizedata" + 14 * " ")
draw_options.extend(
[
"visualization depends on="
"{\\thisrow{sizedata} \\as\\perpointmarksize}",
"scatter/@pre marker code/.append style="
"{/tikz/mark size=\\perpointmarksize}",
]
)
do = " [{}]".format(", ".join(draw_options)) if draw_options else ""
content.append("\\addplot{}\n".format(do))
to = " [{}]".format(", ".join(table_options)) if table_options else ""
content.append("table{}{{%\n".format(to))
content.append((" ".join(labels)).strip() + "\n")
ff = data["float format"]
fmt = (" ".join(dd.shape[1] * [ff])) + "\n"
for d in dd:
content.append(fmt.format(*tuple(d)))
content.append("};\n")
if legend_text is not None:
content.append("\\addlegendentry{{{}}}\n".format(legend_text))
return data, content
|
Get the draw options for a given (patch) object.
|
def get_draw_options(data, obj, ec, fc, style, width):
"""Get the draw options for a given (patch) object.
"""
draw_options = []
if ec is not None:
data, col, ec_rgba = color.mpl_color2xcolor(data, ec)
if ec_rgba[3] != 0.0:
# Don't draw if it's invisible anyways.
draw_options.append("draw={}".format(col))
if fc is not None:
data, col, fc_rgba = color.mpl_color2xcolor(data, fc)
if fc_rgba[3] != 0.0:
# Don't draw if it's invisible anyways.
draw_options.append("fill={}".format(col))
# handle transparency
ff = data["float format"]
if (
ec is not None
and fc is not None
and ec_rgba[3] != 1.0
and ec_rgba[3] == fc_rgba[3]
):
draw_options.append(("opacity=" + ff).format(ec[3]))
else:
if ec is not None and ec_rgba[3] != 1.0:
draw_options.append(("draw opacity=" + ff).format(ec_rgba[3]))
if fc is not None and fc_rgba[3] != 1.0:
draw_options.append(("fill opacity=" + ff).format(fc_rgba[3]))
if width is not None:
w = mpl_linewidth2pgfp_linewidth(data, width)
if w:
draw_options.append(w)
if style is not None:
ls = mpl_linestyle2pgfplots_linestyle(style)
if ls is not None and ls != "solid":
draw_options.append(ls)
return data, draw_options
|
Translates a line style of matplotlib to the corresponding style
in PGFPlots.
|
def mpl_linestyle2pgfplots_linestyle(line_style, line=None):
"""Translates a line style of matplotlib to the corresponding style
in PGFPlots.
"""
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq
# is an even length tuple of on and off ink in points.
#
# solid: [(None, None), (None, None), ..., (None, None)]
# dashed: (0, (6.0, 6.0))
# dotted: (0, (1.0, 3.0))
# dashdot: (0, (3.0, 5.0, 1.0, 5.0))
if isinstance(line_style, tuple):
if line_style[0] is None:
return None
if len(line_style[1]) == 2:
return "dash pattern=on {}pt off {}pt".format(*line_style[1])
assert len(line_style[1]) == 4
return "dash pattern=on {}pt off {}pt on {}pt off {}pt".format(*line_style[1])
if isinstance(line, mpl.lines.Line2D) and line.is_dashed():
# see matplotlib.lines.Line2D.set_dashes
# get defaults
default_dashOffset, default_dashSeq = mpl.lines._get_dash_pattern(line_style)
# get dash format of line under test
dashSeq = line._us_dashSeq
dashOffset = line._us_dashOffset
lst = list()
if dashSeq != default_dashSeq:
# generate own dash sequence
format_string = " ".join(len(dashSeq) // 2 * ["on {}pt off {}pt"])
lst.append("dash pattern=" + format_string.format(*dashSeq))
if dashOffset != default_dashOffset:
lst.append("dash phase={}pt".format(dashOffset))
if len(lst) > 0:
return ", ".join(lst)
return {
"": None,
"None": None,
"none": None, # happens when using plt.boxplot()
"-": "solid",
"solid": "solid",
":": "dotted",
"--": "dashed",
"-.": "dash pattern=on 1pt off 3pt on 3pt off 3pt",
}[line_style]
|
Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
|
def draw_quadmesh(data, obj):
"""Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
"""
content = []
# Generate file name for current object
filename, rel_filepath = files.new_filename(data, "img", ".png")
# Get the dpi for rendering and store the original dpi of the figure
dpi = data["dpi"]
fig_dpi = obj.figure.get_dpi()
obj.figure.set_dpi(dpi)
# Render the object and save as png file
from matplotlib.backends.backend_agg import RendererAgg
cbox = obj.get_clip_box()
width = int(round(cbox.extents[2]))
height = int(round(cbox.extents[3]))
ren = RendererAgg(width, height, dpi)
obj.draw(ren)
# Generate a image from the render buffer
image = Image.frombuffer(
"RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1
)
# Crop the image to the actual content (removing the the regions otherwise
# used for axes, etc.)
# 'image.crop' expects the crop box to specify the left, upper, right, and
# lower pixel. 'cbox.extents' gives the left, lower, right, and upper
# pixel.
box = (
int(round(cbox.extents[0])),
0,
int(round(cbox.extents[2])),
int(round(cbox.extents[3] - cbox.extents[1])),
)
cropped = image.crop(box)
cropped.save(filename)
# Restore the original dpi of the figure
obj.figure.set_dpi(fig_dpi)
# write the corresponding information to the TikZ file
extent = obj.axes.get_xlim() + obj.axes.get_ylim()
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content
|
Translates a matplotlib color specification into a proper LaTeX xcolor.
|
def mpl_color2xcolor(data, matplotlib_color):
"""Translates a matplotlib color specification into a proper LaTeX xcolor.
"""
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, "none", my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
# List white first such that for gray values, the combination
# white!<x>!black is preferred over, e.g., gray!<y>!black. Note that
# the order of the dictionary is respected from Python 3.6 on.
"white": numpy.array([1, 1, 1]),
"lightgray": numpy.array([0.75, 0.75, 0.75]),
"gray": numpy.array([0.5, 0.5, 0.5]),
"darkgray": numpy.array([0.25, 0.25, 0.25]),
"black": numpy.array([0, 0, 0]),
#
"red": numpy.array([1, 0, 0]),
"green": numpy.array([0, 1, 0]),
"blue": numpy.array([0, 0, 1]),
"brown": numpy.array([0.75, 0.5, 0.25]),
"lime": numpy.array([0.75, 1, 0]),
"orange": numpy.array([1, 0.5, 0]),
"pink": numpy.array([1, 0.75, 0.75]),
"purple": numpy.array([0.75, 0, 0.25]),
"teal": numpy.array([0, 0.5, 0.5]),
"violet": numpy.array([0.5, 0, 0.5]),
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data["custom colors"])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == "black":
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ("!{}!black".format(alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = "color" + str(len(data["custom colors"]))
data["custom colors"][xcol] = my_col[:3]
return data, xcol, my_col
|
Return the PGFPlots code for patches.
|
def draw_patch(data, obj):
"""Return the PGFPlots code for patches.
"""
# Gather the draw options.
data, draw_options = mypath.get_draw_options(
data,
obj,
obj.get_edgecolor(),
obj.get_facecolor(),
obj.get_linestyle(),
obj.get_linewidth(),
)
if isinstance(obj, mpl.patches.Rectangle):
# rectangle specialization
return _draw_rectangle(data, obj, draw_options)
elif isinstance(obj, mpl.patches.Ellipse):
# ellipse specialization
return _draw_ellipse(data, obj, draw_options)
# regular patch
data, path_command, _, _ = mypath.draw_path(
data, obj.get_path(), draw_options=draw_options
)
return data, path_command
|
Returns PGFPlots code for a number of patch objects.
|
def draw_patchcollection(data, obj):
"""Returns PGFPlots code for a number of patch objects.
"""
content = []
# Gather the draw options.
try:
ec = obj.get_edgecolor()[0]
except IndexError:
ec = None
try:
fc = obj.get_facecolor()[0]
except IndexError:
fc = None
try:
ls = obj.get_linestyle()[0]
except IndexError:
ls = None
try:
w = obj.get_linewidth()[0]
except IndexError:
w = None
data, draw_options = mypath.get_draw_options(data, obj, ec, fc, ls, w)
paths = obj.get_paths()
for path in paths:
data, cont, draw_options, is_area = mypath.draw_path(
data, path, draw_options=draw_options
)
content.append(cont)
if _is_in_legend(obj):
# Unfortunately, patch legend entries need \addlegendimage in Pgfplots.
tpe = "area legend" if is_area else "line legend"
do = ", ".join([tpe] + draw_options) if draw_options else ""
content += [
"\\addlegendimage{{{}}}\n".format(do),
"\\addlegendentry{{{}}}\n\n".format(obj.get_label()),
]
else:
content.append("\n")
return data, content
|
Return the PGFPlots code for rectangles.
|
def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc). Even those without
# labels explicitly set have a label of "_nolegend_". Everything else should be
# skipped because they likely correspong to axis/legend objects which are handled by
# PGFPlots
label = obj.get_label()
if label == "":
return data, []
# Get actual label, bar charts by default only give rectangles labels of
# "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>.
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
ff = data["float format"]
cont = (
"\\draw[{}] (axis cs:" + ff + "," + ff + ") "
"rectangle (axis cs:" + ff + "," + ff + ");\n"
).format(
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format(
",".join(draw_options)
)
cont += "\\addlegendentry{{{}}}\n\n".format(label)
return data, cont
|
Return the PGFPlots code for ellipses.
|
def _draw_ellipse(data, obj, draw_options):
"""Return the PGFPlots code for ellipses.
"""
if isinstance(obj, mpl.patches.Circle):
# circle specialization
return _draw_circle(data, obj, draw_options)
x, y = obj.center
ff = data["float format"]
if obj.angle != 0:
fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}"
draw_options.append(fmt.format(obj.angle, x, y))
cont = (
"\\draw[{}] (axis cs:"
+ ff
+ ","
+ ff
+ ") ellipse ("
+ ff
+ " and "
+ ff
+ ");\n"
).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height)
return data, cont
|
Return the PGFPlots code for circles.
|
def _draw_circle(data, obj, draw_options):
"""Return the PGFPlots code for circles.
"""
x, y = obj.center
ff = data["float format"]
cont = ("\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n").format(
",".join(draw_options), x, y, obj.get_radius()
)
return data, cont
|
Returns the PGFPlots code for an image environment.
|
def draw_image(data, obj):
"""Returns the PGFPlots code for an image environment.
"""
content = []
filename, rel_filepath = files.new_filename(data, "img", ".png")
# store the image as in a file
img_array = obj.get_array()
dims = img_array.shape
if len(dims) == 2: # the values are given as one real number: look at cmap
clims = obj.get_clim()
mpl.pyplot.imsave(
fname=filename,
arr=img_array,
cmap=obj.get_cmap(),
vmin=clims[0],
vmax=clims[1],
origin=obj.origin,
)
else:
# RGB (+alpha) information at each point
assert len(dims) == 3 and dims[2] in [3, 4]
# convert to PIL image
if obj.origin == "lower":
img_array = numpy.flipud(img_array)
# Convert mpl image to PIL
image = PIL.Image.fromarray(numpy.uint8(img_array * 255))
# If the input image is PIL:
# image = PIL.Image.fromarray(img_array)
image.save(filename, origin=obj.origin)
# write the corresponding information to the TikZ file
extent = obj.get_extent()
# the format specification will only accept tuples
if not isinstance(extent, tuple):
extent = tuple(extent)
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content
|
Check if line is in legend.
|
def get_legend_text(obj):
"""Check if line is in legend.
"""
leg = obj.axes.get_legend()
if leg is None:
return None
keys = [l.get_label() for l in leg.legendHandles if l is not None]
values = [l.get_text() for l in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label]
return None
|
The coordinates might not be in data coordinates, but could be sometimes in axes
coordinates. For example, the matplotlib command
axes.axvline(2)
will have the y coordinates set to 0 and 1, not to the limits. Therefore, a
two-stage transform has to be applied:
1. first transforming to display coordinates, then
2. from display to data.
|
def transform_to_data_coordinates(obj, xdata, ydata):
"""The coordinates might not be in data coordinates, but could be sometimes in axes
coordinates. For example, the matplotlib command
axes.axvline(2)
will have the y coordinates set to 0 and 1, not to the limits. Therefore, a
two-stage transform has to be applied:
1. first transforming to display coordinates, then
2. from display to data.
"""
if obj.axes is not None and obj.get_transform() != obj.axes.transData:
points = numpy.array([xdata, ydata]).T
transform = matplotlib.transforms.composite_transform_factory(
obj.get_transform(), obj.axes.transData.inverted()
)
return transform.transform(points).T
return xdata, ydata
|
Main function. Here, the recursion into the image starts and the
contents are picked up. The actual file gets written in this routine.
:param figure: either a Figure object or 'gcf' (default).
:param figurewidth: If not ``None``, this will be used as figure width
within the TikZ/PGFPlots output. If ``figureheight``
is not given, ``matplotlib2tikz`` will try to preserve
the original width/height ratio.
Note that ``figurewidth`` can be a string literal,
such as ``'\\figurewidth'``.
:type figurewidth: str
:param figureheight: If not ``None``, this will be used as figure height
within the TikZ/PGFPlots output. If ``figurewidth`` is
not given, ``matplotlib2tikz`` will try to preserve
the original width/height ratio. Note that
``figurewidth`` can be a string literal, such as
``'\\figureheight'``.
:type figureheight: str
:param textsize: The text size (in pt) that the target latex document is
using. Default is 10.0.
:type textsize: float
:param tex_relative_path_to_data: In some cases, the TikZ file will have to
refer to another file, e.g., a PNG for
image plots. When ``\\input`` into a
regular LaTeX document, the additional
file is looked for in a folder relative
to the LaTeX file, not the TikZ file.
This arguments optionally sets the
relative path from the LaTeX file to the
data.
:type tex_relative_path_to_data: str
:param externalize_tables: Whether or not to externalize plot data tables
into tsv files.
:type externalize_tables: bool
:param override_externals: Whether or not to override existing external
files (such as tsv or images) with conflicting
names (the alternative is to choose other
names).
:type override_externals: bool
:param strict: Whether or not to strictly stick to matplotlib's appearance.
This influences, for example, whether tick marks are set
exactly as in the matplotlib plot, or if TikZ/PGFPlots
can decide where to put the ticks.
:type strict: bool
:param wrap: Whether ``'\\begin{tikzpicture}'`` and
``'\\end{tikzpicture}'`` will be written. One might need to
provide custom arguments to the environment (eg. scale= etc.).
Default is ``True``.
:type wrap: bool
:param add_axis_environment: Whether ``'\\begin{axis}[...]'`` and
``'\\end{axis}'`` will be written. One needs to
set the environment in the document. If ``False``
additionally sets ``wrap=False``. Default is ``True``.
:type add_axis_environment: bool
:param extra_axis_parameters: Extra axis options to be passed (as a list or set)
to pgfplots. Default is ``None``.
:type extra_axis_parameters: a list or set of strings for the pfgplots axes.
:param extra_tikzpicture_parameters: Extra tikzpicture options to be passed
(as a set) to pgfplots.
:type extra_tikzpicture_parameters: a set of strings for the pfgplots
tikzpicture.
:param dpi: The resolution in dots per inch of the rendered image in case
of QuadMesh plots. If ``None`` it will default to the value
``savefig.dpi`` from matplotlib.rcParams. Default is ``None``.
:type dpi: int
:param show_info: Show extra info on the command line. Default is ``False``.
:type show_info: bool
:param include_disclaimer: Include matplotlib2tikz disclaimer in the output.
Set ``False`` to make tests reproducible.
Default is ``True``.
:type include_disclaimer: bool
:param standalone: Include wrapper code for a standalone LaTeX file.
:type standalone: bool
:param float_format: Format for float entities. Default is ```"{:.15g}"```.
:type float_format: str
:param table_row_sep: Row separator for table data. Default is ```"\\n"```.
:type table_row_sep: str
:returns: None
The following optional attributes of matplotlib's objects are recognized
and handled:
- axes.Axes._matplotlib2tikz_anchors
This attribute can be set to a list of ((x,y), anchor_name) tuples.
Invisible nodes at the respective location will be created which can be
referenced from outside the axis environment.
|
def get_tikz_code(
figure="gcf",
filepath=None,
figurewidth=None,
figureheight=None,
textsize=10.0,
tex_relative_path_to_data=None,
externalize_tables=False,
override_externals=False,
strict=False,
wrap=True,
add_axis_environment=True,
extra_axis_parameters=None,
extra_tikzpicture_parameters=None,
dpi=None,
show_info=False,
include_disclaimer=True,
standalone=False,
float_format="{:.15g}",
table_row_sep="\n",
):
"""Main function. Here, the recursion into the image starts and the
contents are picked up. The actual file gets written in this routine.
:param figure: either a Figure object or 'gcf' (default).
:param figurewidth: If not ``None``, this will be used as figure width
within the TikZ/PGFPlots output. If ``figureheight``
is not given, ``matplotlib2tikz`` will try to preserve
the original width/height ratio.
Note that ``figurewidth`` can be a string literal,
such as ``'\\figurewidth'``.
:type figurewidth: str
:param figureheight: If not ``None``, this will be used as figure height
within the TikZ/PGFPlots output. If ``figurewidth`` is
not given, ``matplotlib2tikz`` will try to preserve
the original width/height ratio. Note that
``figurewidth`` can be a string literal, such as
``'\\figureheight'``.
:type figureheight: str
:param textsize: The text size (in pt) that the target latex document is
using. Default is 10.0.
:type textsize: float
:param tex_relative_path_to_data: In some cases, the TikZ file will have to
refer to another file, e.g., a PNG for
image plots. When ``\\input`` into a
regular LaTeX document, the additional
file is looked for in a folder relative
to the LaTeX file, not the TikZ file.
This arguments optionally sets the
relative path from the LaTeX file to the
data.
:type tex_relative_path_to_data: str
:param externalize_tables: Whether or not to externalize plot data tables
into tsv files.
:type externalize_tables: bool
:param override_externals: Whether or not to override existing external
files (such as tsv or images) with conflicting
names (the alternative is to choose other
names).
:type override_externals: bool
:param strict: Whether or not to strictly stick to matplotlib's appearance.
This influences, for example, whether tick marks are set
exactly as in the matplotlib plot, or if TikZ/PGFPlots
can decide where to put the ticks.
:type strict: bool
:param wrap: Whether ``'\\begin{tikzpicture}'`` and
``'\\end{tikzpicture}'`` will be written. One might need to
provide custom arguments to the environment (eg. scale= etc.).
Default is ``True``.
:type wrap: bool
:param add_axis_environment: Whether ``'\\begin{axis}[...]'`` and
``'\\end{axis}'`` will be written. One needs to
set the environment in the document. If ``False``
additionally sets ``wrap=False``. Default is ``True``.
:type add_axis_environment: bool
:param extra_axis_parameters: Extra axis options to be passed (as a list or set)
to pgfplots. Default is ``None``.
:type extra_axis_parameters: a list or set of strings for the pfgplots axes.
:param extra_tikzpicture_parameters: Extra tikzpicture options to be passed
(as a set) to pgfplots.
:type extra_tikzpicture_parameters: a set of strings for the pfgplots
tikzpicture.
:param dpi: The resolution in dots per inch of the rendered image in case
of QuadMesh plots. If ``None`` it will default to the value
``savefig.dpi`` from matplotlib.rcParams. Default is ``None``.
:type dpi: int
:param show_info: Show extra info on the command line. Default is ``False``.
:type show_info: bool
:param include_disclaimer: Include matplotlib2tikz disclaimer in the output.
Set ``False`` to make tests reproducible.
Default is ``True``.
:type include_disclaimer: bool
:param standalone: Include wrapper code for a standalone LaTeX file.
:type standalone: bool
:param float_format: Format for float entities. Default is ```"{:.15g}"```.
:type float_format: str
:param table_row_sep: Row separator for table data. Default is ```"\\n"```.
:type table_row_sep: str
:returns: None
The following optional attributes of matplotlib's objects are recognized
and handled:
- axes.Axes._matplotlib2tikz_anchors
This attribute can be set to a list of ((x,y), anchor_name) tuples.
Invisible nodes at the respective location will be created which can be
referenced from outside the axis environment.
"""
# not as default value because gcf() would be evaluated at import time
if figure == "gcf":
figure = plt.gcf()
data = {}
data["fwidth"] = figurewidth
data["fheight"] = figureheight
data["rel data path"] = tex_relative_path_to_data
data["externalize tables"] = externalize_tables
data["override externals"] = override_externals
if filepath:
data["output dir"] = os.path.dirname(filepath)
else:
directory = tempfile.mkdtemp()
data["output dir"] = directory
data["base name"] = (
os.path.splitext(os.path.basename(filepath))[0] if filepath else "tmp"
)
data["strict"] = strict
data["tikz libs"] = set()
data["pgfplots libs"] = set()
data["font size"] = textsize
data["custom colors"] = {}
data["legend colors"] = []
data["extra tikzpicture parameters"] = extra_tikzpicture_parameters
data["add axis environment"] = add_axis_environment
data["show_info"] = show_info
# rectangle_legends is used to keep track of which rectangles have already
# had \addlegendimage added. There should be only one \addlegenimage per
# bar chart data series.
data["rectangle_legends"] = set()
if extra_axis_parameters:
data["extra axis options [base]"] = set(extra_axis_parameters).copy()
else:
data["extra axis options [base]"] = set()
if dpi:
data["dpi"] = dpi
else:
savefig_dpi = mpl.rcParams["savefig.dpi"]
data["dpi"] = (
savefig_dpi if isinstance(savefig_dpi, int) else mpl.rcParams["figure.dpi"]
)
data["float format"] = float_format
data["table_row_sep"] = table_row_sep
# print message about necessary pgfplot libs to command line
if show_info:
_print_pgfplot_libs_message(data)
# gather the file content
data, content = _recurse(data, figure)
# Check if there is still an open groupplot environment. This occurs if not
# all of the group plot slots are used.
if "is_in_groupplot_env" in data and data["is_in_groupplot_env"]:
content.extend("\\end{groupplot}\n\n")
# write disclaimer to the file header
code = """"""
if include_disclaimer:
disclaimer = "This file was created by matplotlib2tikz v{}.".format(__version__)
code += _tex_comment(disclaimer)
# write the contents
if wrap and add_axis_environment:
code += "\\begin{tikzpicture}\n\n"
if extra_tikzpicture_parameters:
code += ",\n".join(data["extra tikzpicture parameters"])
code += "\n"
coldefs = _get_color_definitions(data)
if coldefs:
code += "\n".join(coldefs)
code += "\n\n"
code += "".join(content)
if wrap and add_axis_environment:
code += "\\end{tikzpicture}"
if standalone:
# create a latex wrapper for the tikz
# <https://tex.stackexchange.com/a/361070/13262>
code = """\\documentclass{{standalone}}
\\usepackage[utf8]{{inputenc}}
\\usepackage{{pgfplots}}
\\usepgfplotslibrary{{groupplots}}
\\usepgfplotslibrary{{dateplot}}
\\pgfplotsset{{compat=newest}}
\\DeclareUnicodeCharacter{{2212}}{{−}}
\\begin{{document}}
{}
\\end{{document}}""".format(
code
)
return code
|
Same as `get_tikz_code()`, but actually saves the code to a file.
:param filepath: The file to which the TikZ output will be written.
:type filepath: str
:param encoding: Sets the text encoding of the output file, e.g. 'utf-8'.
For supported values: see ``codecs`` module.
:returns: None
|
def save(filepath, *args, encoding=None, **kwargs):
"""Same as `get_tikz_code()`, but actually saves the code to a file.
:param filepath: The file to which the TikZ output will be written.
:type filepath: str
:param encoding: Sets the text encoding of the output file, e.g. 'utf-8'.
For supported values: see ``codecs`` module.
:returns: None
"""
code = get_tikz_code(*args, filepath=filepath, **kwargs)
file_handle = codecs.open(filepath, "w", encoding)
try:
file_handle.write(code)
except UnicodeEncodeError:
# We're probably using Python 2, so treat unicode explicitly
file_handle.write(six.text_type(code).encode("utf-8"))
file_handle.close()
return
|
Returns the list of custom color definitions for the TikZ file.
|
def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}"
for name, rgb in data["custom colors"].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2]))
return definitions
|
Prints message to screen indicating the use of PGFPlots and its
libraries.
|
def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return
|
Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those.
|
def _recurse(data, obj):
"""Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those.
"""
content = _ContentManager()
for child in obj.get_children():
# Some patches are Spines, too; skip those entirely.
# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.
if isinstance(child, mpl.spines.Spine):
continue
if isinstance(child, mpl.axes.Axes):
ax = axes.Axes(data, child)
if ax.is_colorbar:
continue
# add extra axis options
if data["extra axis options [base]"]:
ax.axis_options.extend(data["extra axis options [base]"])
data["current mpl axes obj"] = child
data["current axes"] = ax
# Run through the child objects, gather the content.
data, children_content = _recurse(data, child)
# populate content and add axis environment if desired
if data["add axis environment"]:
content.extend(
ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0
)
else:
content.extend(children_content, 0)
# print axis environment options, if told to show infos
if data["show_info"]:
print("=========================================================")
print("These would have been the properties of the environment:")
print("".join(ax.get_begin_code()[1:]))
print("=========================================================")
elif isinstance(child, mpl.lines.Line2D):
data, cont = line2d.draw_line2d(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.image.AxesImage):
data, cont = img.draw_image(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.patches.Patch):
data, cont = patch.draw_patch(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(
child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection)
):
data, cont = patch.draw_patchcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.PathCollection):
data, cont = path.draw_pathcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.LineCollection):
data, cont = line2d.draw_linecollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.QuadMesh):
data, cont = qmsh.draw_quadmesh(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.legend.Legend):
data = legend.draw_legend(data, child)
if data["legend colors"]:
content.extend(data["legend colors"], 0)
elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)):
data, cont = text.draw_text(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)):
pass
else:
warnings.warn(
"matplotlib2tikz: Don't know how to handle object {}.".format(
type(child)
)
)
return data, content.flatten()
|
Extends with a list and a z-order
|
def extend(self, content, zorder):
""" Extends with a list and a z-order
"""
if zorder not in self._content:
self._content[zorder] = []
self._content[zorder].extend(content)
|
Returns the PGFPlots code for an Line2D environment.
|
def draw_line2d(data, obj):
"""Returns the PGFPlots code for an Line2D environment.
"""
content = []
addplot_options = []
# If line is of length 0, do nothing. Otherwise, an empty \addplot table will be
# created, which will be interpreted as an external data source in either the file
# '' or '.tex'. Instead, render nothing.
if len(obj.get_xdata()) == 0:
return data, []
# get the linewidth (in pt)
line_width = mypath.mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth())
if line_width:
addplot_options.append(line_width)
# get line color
color = obj.get_color()
data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color)
addplot_options.append(line_xcolor)
alpha = obj.get_alpha()
if alpha is not None:
addplot_options.append("opacity={}".format(alpha))
linestyle = mypath.mpl_linestyle2pgfplots_linestyle(obj.get_linestyle(), line=obj)
if linestyle is not None and linestyle != "solid":
addplot_options.append(linestyle)
marker_face_color = obj.get_markerfacecolor()
marker_edge_color = obj.get_markeredgecolor()
data, marker, extra_mark_options = _mpl_marker2pgfp_marker(
data, obj.get_marker(), marker_face_color
)
if marker:
_marker(
obj,
data,
marker,
addplot_options,
extra_mark_options,
marker_face_color,
marker_edge_color,
line_xcolor,
)
if marker and linestyle is None:
addplot_options.append("only marks")
# Check if a line is in a legend and forget it if not.
# Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>.
legend_text = get_legend_text(obj)
if legend_text is None and has_legend(obj.axes):
addplot_options.append("forget plot")
# process options
content.append("\\addplot ")
if addplot_options:
content.append("[{}]\n".format(", ".join(addplot_options)))
c, axis_options = _table(obj, data)
content += c
if legend_text is not None:
content.append("\\addlegendentry{{{}}}\n".format(legend_text))
return data, content
|
Returns Pgfplots code for a number of patch objects.
|
def draw_linecollection(data, obj):
"""Returns Pgfplots code for a number of patch objects.
"""
content = []
edgecolors = obj.get_edgecolors()
linestyles = obj.get_linestyles()
linewidths = obj.get_linewidths()
paths = obj.get_paths()
for i, path in enumerate(paths):
color = edgecolors[i] if i < len(edgecolors) else edgecolors[0]
style = linestyles[i] if i < len(linestyles) else linestyles[0]
width = linewidths[i] if i < len(linewidths) else linewidths[0]
data, options = mypath.get_draw_options(data, obj, color, None, style, width)
# TODO what about masks?
data, cont, _, _ = mypath.draw_path(
data, path, draw_options=options, simplify=False
)
content.append(cont + "\n")
return data, content
|
Translates a marker style of matplotlib to the corresponding style
in PGFPlots.
|
def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color):
"""Translates a marker style of matplotlib to the corresponding style
in PGFPlots.
"""
# try default list
try:
pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker]
except KeyError:
pass
else:
if (marker_face_color is not None) and pgfplots_marker == "o":
pgfplots_marker = "*"
data["tikz libs"].add("plotmarks")
marker_options = None
return (data, pgfplots_marker, marker_options)
# try plotmarks list
try:
data["tikz libs"].add("plotmarks")
pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker]
except KeyError:
# There's no equivalent for the pixel marker (,) in Pgfplots.
pass
else:
if (
marker_face_color is not None
and (
not isinstance(marker_face_color, str)
or marker_face_color.lower() != "none"
)
and pgfplots_marker not in ["|", "-", "asterisk", "star"]
):
pgfplots_marker += "*"
return (data, pgfplots_marker, marker_options)
return data, None, None
|
Paints text on the graph.
|
def draw_text(data, obj):
"""Paints text on the graph.
"""
content = []
properties = []
style = []
if isinstance(obj, mpl.text.Annotation):
_annotation(obj, data, content)
# 1: coordinates
# 2: properties (shapes, rotation, etc)
# 3: text style
# 4: the text
# -------1--------2---3--4--
pos = obj.get_position()
# from .util import transform_to_data_coordinates
# pos = transform_to_data_coordinates(obj, *pos)
text = obj.get_text()
if text in ["", data["current axis title"]]:
# Text nodes which are direct children of Axes are typically titles. They are
# already captured by the `title` property of pgfplots axes, so skip them here.
return data, content
size = obj.get_size()
bbox = obj.get_bbox_patch()
converter = mpl.colors.ColorConverter()
# without the factor 0.5, the fonts are too big most of the time.
# TODO fix this
scaling = 0.5 * size / data["font size"]
ff = data["float format"]
if scaling != 1.0:
properties.append(("scale=" + ff).format(scaling))
if bbox is not None:
_bbox(bbox, data, properties, scaling)
ha = obj.get_ha()
va = obj.get_va()
anchor = _transform_positioning(ha, va)
if anchor is not None:
properties.append(anchor)
data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color()))
properties.append("text={}".format(col))
properties.append("rotate={:.1f}".format(obj.get_rotation()))
if obj.get_style() == "italic":
style.append("\\itshape")
else:
assert obj.get_style() == "normal"
# From matplotlib/font_manager.py:
# weight_dict = {
# 'ultralight' : 100,
# 'light' : 200,
# 'normal' : 400,
# 'regular' : 400,
# 'book' : 400,
# 'medium' : 500,
# 'roman' : 500,
# 'semibold' : 600,
# 'demibold' : 600,
# 'demi' : 600,
# 'bold' : 700,
# 'heavy' : 800,
# 'extra bold' : 800,
# 'black' : 900}
#
# get_weights returns a numeric value in the range 0-1000 or one of
# ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’,
# ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’
weight = obj.get_weight()
if weight in [
"semibold",
"demibold",
"demi",
"bold",
"heavy",
"extra bold",
"black",
] or (isinstance(weight, int) and weight > 550):
style.append("\\bfseries")
# \lfseries isn't that common yet
# elif weight == 'light' or (isinstance(weight, int) and weight < 300):
# style.append('\\lfseries')
if obj.axes:
# If the coordinates are relative to an axis, use `axis cs`.
tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos)
else:
# relative to the entire figure, it's a getting a littler harder. See
# <http://tex.stackexchange.com/a/274902/13262> for a solution to the
# problem:
tikz_pos = (
"({{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.south east)$}}"
"|-"
"{{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.north west)$}})"
).format(*pos)
if "\n" in text:
# http://tex.stackexchange.com/a/124114/13262
properties.append("align={}".format(ha))
# Manipulating the text here is actually against mpl2tikz's policy not
# to do that. On the other hand, newlines should translate into
# newlines.
# We might want to remove this here in the future.
text = text.replace("\n ", "\\\\")
content.append(
"\\node at {}[\n {}\n]{{{}}};\n".format(
tikz_pos, ",\n ".join(properties), " ".join(style + [text])
)
)
return data, content
|
Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less.
|
def _transform_positioning(ha, va):
"""Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less."""
if ha == "center" and va == "center":
return None
ha_mpl_to_tikz = {"right": "east", "left": "west", "center": ""}
va_mpl_to_tikz = {
"top": "north",
"bottom": "south",
"center": "",
"baseline": "base",
}
return "anchor={} {}".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip()
|
Import a JSON file or file-like object into a `rows.Table`.
If a file-like object is provided it MUST be open in text (non-binary) mode
on Python 3 and could be open in both binary or text mode on Python 2.
|
def import_from_json(filename_or_fobj, encoding="utf-8", *args, **kwargs):
"""Import a JSON file or file-like object into a `rows.Table`.
If a file-like object is provided it MUST be open in text (non-binary) mode
on Python 3 and could be open in both binary or text mode on Python 2.
"""
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="json", encoding=encoding)
json_obj = json.load(source.fobj, encoding=source.encoding)
field_names = list(json_obj[0].keys())
table_rows = [[item[key] for key in field_names] for item in json_obj]
meta = {"imported_from": "json", "source": source}
return create_table([field_names] + table_rows, meta=meta, *args, **kwargs)
|
Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
|
def export_to_json(
table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs
):
"""Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
"""
# TODO: will work only if table.fields is OrderedDict
fields = table.fields
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
data = [
{
field_name: _convert(value, fields[field_name], *args, **kwargs)
for field_name, value in zip(field_names, row)
}
for row in prepared_table
]
result = json.dumps(data, indent=indent)
if type(result) is six.text_type: # Python 3
result = result.encode(encoding)
if indent is not None:
# clean up empty spaces at the end of lines
result = b"\n".join(line.rstrip() for line in result.splitlines())
return export_data(filename_or_fobj, result, mode="wb")
|
Return the plugin name based on the URI
|
def plugin_name_by_uri(uri):
"Return the plugin name based on the URI"
# TODO: parse URIs like 'sqlite://' also
parsed = urlparse(uri)
basename = os.path.basename(parsed.path)
if not basename.strip():
raise RuntimeError("Could not identify file format.")
plugin_name = basename.split(".")[-1].lower()
if plugin_name in FILE_EXTENSIONS:
plugin_name = MIME_TYPE_TO_PLUGIN_NAME[FILE_EXTENSIONS[plugin_name]]
return plugin_name
|
Return the file extension used by this plugin
|
def extension_by_source(source, mime_type):
"Return the file extension used by this plugin"
# TODO: should get this information from the plugin
extension = source.plugin_name
if extension:
return extension
if mime_type:
return mime_type.split("/")[-1]
|
Return the plugin name based on the MIME type
|
def plugin_name_by_mime_type(mime_type, mime_name, file_extension):
"Return the plugin name based on the MIME type"
return MIME_TYPE_TO_PLUGIN_NAME.get(
normalize_mime_type(mime_type, mime_name, file_extension), None
)
|
Return a `rows.Source` with information for a given URI
If URI starts with "http" or "https" the file will be downloaded.
This function should only be used if the URI already exists because it's
going to download/open the file to detect its encoding and MIME type.
|
def detect_source(uri, verify_ssl, progress, timeout=5):
"""Return a `rows.Source` with information for a given URI
If URI starts with "http" or "https" the file will be downloaded.
This function should only be used if the URI already exists because it's
going to download/open the file to detect its encoding and MIME type.
"""
# TODO: should also supporte other schemes, like file://, sqlite:// etc.
if uri.lower().startswith("http://") or uri.lower().startswith("https://"):
return download_file(
uri, verify_ssl=verify_ssl, timeout=timeout, progress=progress, detect=True
)
elif uri.startswith("postgres://"):
return Source(
should_delete=False,
encoding=None,
plugin_name="postgresql",
uri=uri,
is_file=False,
local=None,
)
else:
return local_file(uri)
|
Import data described in a `rows.Source` into a `rows.Table`
|
def import_from_source(source, default_encoding, *args, **kwargs):
"Import data described in a `rows.Source` into a `rows.Table`"
# TODO: test open_compressed
plugin_name = source.plugin_name
kwargs["encoding"] = (
kwargs.get("encoding", None) or source.encoding or default_encoding
)
try:
import_function = getattr(rows, "import_from_{}".format(plugin_name))
except AttributeError:
raise ValueError('Plugin (import) "{}" not found'.format(plugin_name))
table = import_function(source.uri, *args, **kwargs)
return table
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.