index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
713,843
|
xmlschema.validators.elements
|
get_path
|
Returns the XPath expression of the element. The path is relative to the schema instance
in which the element is contained or is relative to a specific ancestor passed as argument.
In the latter case returns `None` if the argument is not an ancestor.
:param ancestor: optional XSD component of the same schema, that maybe an ancestor of the element.
:param reverse: if set to `True` returns the reverse path, from the element to ancestor.
|
def get_path(self, ancestor: Optional[XsdComponent] = None,
reverse: bool = False) -> Optional[str]:
"""
Returns the XPath expression of the element. The path is relative to the schema instance
in which the element is contained or is relative to a specific ancestor passed as argument.
In the latter case returns `None` if the argument is not an ancestor.
:param ancestor: optional XSD component of the same schema, that maybe \
an ancestor of the element.
:param reverse: if set to `True` returns the reverse path, from the element to ancestor.
"""
path: List[str] = []
xsd_component: Optional[XsdComponent] = self
while xsd_component is not None:
if xsd_component is ancestor:
return '/'.join(reversed(path)) or '.'
elif isinstance(xsd_component, XsdElement):
path.append('..' if reverse else xsd_component.name)
xsd_component = xsd_component.parent
else:
if ancestor is None:
return '/'.join(reversed(path)) or '.'
return None
|
(self, ancestor: Optional[xmlschema.validators.xsdbase.XsdComponent] = None, reverse: bool = False) -> Optional[str]
|
713,844
|
xmlschema.validators.elements
|
get_type
| null |
def get_type(self, elem: Union[ElementType, ElementData],
inherited: Optional[Dict[str, Any]] = None) -> BaseXsdType:
return self._head_type or self.type
|
(self, elem: Union[~T, NoneType, xmlschema.converters.default.ElementData], inherited: Optional[Dict[str, Any]] = None) -> Optional[~T]
|
713,845
|
xmlschema.validators.particles
|
has_occurs_restriction
| null |
def has_occurs_restriction(self, other: Union[ModelParticleType, 'OccursCalculator']) -> bool:
if self.min_occurs < other.min_occurs:
return False
elif self.max_occurs == 0:
return True
elif other.max_occurs is None:
return True
elif self.max_occurs is None:
return False
else:
return self.max_occurs <= other.max_occurs
|
(self, other: Union[~T, NoneType, xmlschema.validators.particles.OccursCalculator]) -> bool
|
713,847
|
xmlschema.validators.particles
|
is_ambiguous
|
Tests if min_occurs != max_occurs.
|
def is_ambiguous(self) -> bool:
"""Tests if min_occurs != max_occurs."""
return self.min_occurs != self.max_occurs
|
(self) -> bool
|
713,848
|
xmlschema.validators.elements
|
is_consistent
|
Element Declarations Consistent check between two element particles.
Ref: https://www.w3.org/TR/xmlschema-1/#cos-element-consistent
:returns: `True` if there is no inconsistency between the particles, `False` otherwise,
|
def is_consistent(self, other: SchemaElementType, strict: bool = True) -> bool:
"""
Element Declarations Consistent check between two element particles.
Ref: https://www.w3.org/TR/xmlschema-1/#cos-element-consistent
:returns: `True` if there is no inconsistency between the particles, `False` otherwise,
"""
return self.name != other.name or self.type is other.type
|
(self, other: Optional[~T], strict: bool = True) -> bool
|
713,849
|
xmlschema.validators.particles
|
is_emptiable
|
Tests if min_occurs == 0. A model group that can have zero-length is
considered emptiable. For model groups the test outcome depends also
on nested particles.
|
def is_emptiable(self) -> bool:
"""
Tests if min_occurs == 0. A model group that can have zero-length is
considered emptiable. For model groups the test outcome depends also
on nested particles.
"""
return self.min_occurs == 0
|
(self) -> bool
|
713,850
|
xmlschema.validators.particles
|
is_empty
|
Tests if max_occurs == 0. A zero-length model group is considered empty.
|
def is_empty(self) -> bool:
"""
Tests if max_occurs == 0. A zero-length model group is considered empty.
"""
return self.max_occurs == 0
|
(self) -> bool
|
713,851
|
xmlschema.validators.particles
|
is_exceeded
|
Tests if particle occurrences are over the maximum.
|
def is_exceeded(self, occurs: Union[OccursCounterType, int]) -> bool:
"""Tests if particle occurrences are over the maximum."""
if self.max_occurs is None:
return False
try:
return self.max_occurs < occurs[self] # type: ignore[index]
except TypeError:
return self.max_occurs < occurs # type: ignore[operator]
|
(self, occurs: Union[~T, NoneType, int]) -> bool
|
713,853
|
xmlschema.validators.elements
|
is_matching
| null |
def is_matching(self, name: Optional[str], default_namespace: Optional[str] = None,
group: Optional['XsdGroup'] = None, **kwargs: Any) -> bool:
if not name:
return False
elif default_namespace and name[0] != '{':
name = f'{{{default_namespace}}}{name}'
# Workaround for backward compatibility of XPath selectors on schemas.
if not self.qualified and default_namespace == self.target_namespace:
return (name == self.qualified_name or
any(name == e.qualified_name for e in self.iter_substitutes()))
return name == self.name or any(name == e.name for e in self.iter_substitutes())
|
(self, name: Optional[str], default_namespace: Optional[str] = None, group: Optional[ForwardRef('XsdGroup')] = None, **kwargs: Any) -> bool
|
713,854
|
xmlschema.validators.particles
|
is_missing
|
Tests if the particle occurrences are under the minimum.
|
def is_missing(self, occurs: Union[OccursCounterType, int]) -> bool:
"""Tests if the particle occurrences are under the minimum."""
try:
return self.min_occurs > occurs[self] # type: ignore[index]
except TypeError:
return self.min_occurs > occurs # type: ignore[operator]
|
(self, occurs: Union[~T, NoneType, int]) -> bool
|
713,855
|
xmlschema.validators.particles
|
is_multiple
|
Tests the particle can have multiple occurrences.
|
def is_multiple(self) -> bool:
"""Tests the particle can have multiple occurrences."""
return not self.is_empty() and not self.is_single()
|
(self) -> bool
|
713,856
|
xmlschema.validators.particles
|
is_over
|
Tests if particle occurrences are equal or over the maximum.
|
def is_over(self, occurs: Union[OccursCounterType, int]) -> bool:
"""Tests if particle occurrences are equal or over the maximum."""
if self.max_occurs is None:
return False
try:
return self.max_occurs <= occurs[self] # type: ignore[index]
except TypeError:
return self.max_occurs <= occurs # type: ignore[operator]
|
(self, occurs: Union[~T, NoneType, int]) -> bool
|
713,857
|
xmlschema.validators.elements
|
is_overlap
| null |
def is_overlap(self, other: SchemaElementType) -> bool:
if isinstance(other, XsdElement):
if self.name == other.name:
return True
elif other.substitution_group == self.name or other.name == self.substitution_group:
return True
elif isinstance(other, XsdAnyElement):
if other.is_matching(self.name, self.default_namespace):
return True
for e in self.maps.substitution_groups.get(self.name, ()):
if other.is_matching(e.name, self.default_namespace):
return True
return False
|
(self, other: Optional[~T]) -> bool
|
713,859
|
xmlschema.validators.elements
|
is_restriction
| null |
def is_restriction(self, other: ModelParticleType, check_occurs: bool = True) -> bool:
e: ModelParticleType
if isinstance(other, XsdAnyElement):
if self.min_occurs == self.max_occurs == 0:
return True
if check_occurs and not self.has_occurs_restriction(other):
return False
return other.is_matching(self.name, self.default_namespace)
elif isinstance(other, XsdElement):
if self.name != other.name:
if other.name == self.substitution_group and \
other.min_occurs != other.max_occurs and \
self.max_occurs != 0 and not other.abstract \
and self.xsd_version == '1.0':
# A UPA violation case. Base is the head element, it's not
# abstract and has non-deterministic occurs: this is less
# restrictive than W3C test group (elemZ026), marked as
# invalid despite it's based on an abstract declaration.
# See also test case invalid_restrictions1.xsd.
return False
for e in other.iter_substitutes():
if e.name == self.name:
break
else:
return False
if check_occurs and not self.has_occurs_restriction(other):
return False
elif self.max_occurs == 0 and check_occurs:
return True # type is not effective if the element can't have occurrences
elif not self.is_consistent(other) and self.type.elem is not other.type.elem and \
not self.type.is_derived(other.type, 'restriction') and not other.type.abstract:
return False
elif other.fixed is not None and \
(self.fixed is None or self.type.normalize(
self.fixed) != other.type.normalize(other.fixed)):
return False
elif other.nillable is False and self.nillable:
return False
elif any(value not in self.block for value in other.block.split()):
return False
elif not all(k in other.identities for k in self.identities):
return False
else:
return True
elif other.model == 'choice':
if other.is_empty() and self.max_occurs != 0:
return False
check_group_items_occurs = self.xsd_version == '1.0'
total_occurs = OccursCalculator()
for e in other.iter_model():
if not isinstance(e, (XsdElement, XsdAnyElement)):
return False
elif not self.is_restriction(e, check_group_items_occurs):
continue
total_occurs += e
total_occurs *= other
if self.has_occurs_restriction(total_occurs):
return True
total_occurs.reset()
return False
else:
match_restriction = False
for e in other.iter_model():
if match_restriction:
if not e.is_emptiable():
return False
elif self.is_restriction(e):
match_restriction = True
elif not e.is_emptiable():
return False
return True
|
(self, other: Optional[~T], check_occurs: bool = True) -> bool
|
713,860
|
xmlschema.validators.elements
|
is_single
| null |
def is_single(self) -> bool:
if self.parent is None:
return True
elif self.max_occurs != 1:
return False
elif self.parent.max_occurs == 1:
return True
else:
return self.parent.model != 'choice' and len(self.parent) > 1
|
(self) -> bool
|
713,861
|
xmlschema.validators.particles
|
is_univocal
|
Tests if min_occurs == max_occurs.
|
def is_univocal(self) -> bool:
"""Tests if min_occurs == max_occurs."""
return self.min_occurs == self.max_occurs
|
(self) -> bool
|
713,865
|
xmlschema.validators.elements
|
iter_components
| null |
def iter_components(self, xsd_classes: Optional[ComponentClassType] = None) \
-> Iterator[XsdComponent]:
if xsd_classes is None:
yield self
yield from self.identities
else:
if isinstance(self, xsd_classes):
yield self
if issubclass(XsdIdentity, xsd_classes):
yield from self.identities
if self.ref is None and self.type.parent is not None:
yield from self.type.iter_components(xsd_classes)
|
(self, xsd_classes: Optional[~T] = None) -> Iterator[xmlschema.validators.xsdbase.XsdComponent]
|
713,866
|
xmlschema.validators.elements
|
iter_decode
|
Creates an iterator for decoding an Element instance.
:param obj: the Element that has to be decoded.
:param validation: the validation mode, can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the decoding process.
:return: yields a decoded object, eventually preceded by a sequence of validation or decoding errors.
|
def iter_decode(self, obj: ElementType, validation: str = 'lax', **kwargs: Any) \
-> IterDecodeType[Any]:
"""
Creates an iterator for decoding an Element instance.
:param obj: the Element that has to be decoded.
:param validation: the validation mode, can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the decoding process.
:return: yields a decoded object, eventually preceded by a sequence of \
validation or decoding errors.
"""
error: Union[XMLSchemaValueError, XMLSchemaValidationError]
result: Any
if self.abstract:
reason = _("cannot use an abstract element for validation")
yield self.validation_error(validation, reason, obj, **kwargs)
# Control validation on element and its descendants or stop validation
if 'validation_hook' in kwargs:
value = kwargs['validation_hook'](obj, self)
if value:
if isinstance(value, str) and value in XSD_VALIDATION_MODES:
validation = value
else:
return
kwargs['elem'] = obj
try:
level = kwargs['level']
except KeyError:
level = kwargs['level'] = 0
try:
identities = kwargs['identities']
except KeyError:
identities = kwargs['identities'] = {}
for identity in self.identities:
if identity in identities:
identities[identity].reset(obj)
else:
identities[identity] = identity.get_counter(obj)
try:
converter = kwargs['converter']
except KeyError:
converter = self._get_converter(obj, kwargs)
else:
if not isinstance(converter, NamespaceMapper):
converter = self._get_converter(obj, kwargs)
if not level:
# Need to set base context with the right object (the resource can be lazy)
converter.set_context(obj, level)
elif kwargs.get('use_location_hints'):
# Use location hints for dynamic schema load
yield from self.check_dynamic_context(obj, validation, options=kwargs)
inherited = kwargs.get('inherited')
value = content = attributes = None
nilled = False
# Get the instance effective type
xsd_type = self.get_type(obj, inherited)
if XSI_TYPE in obj.attrib and self.schema.meta_schema is not None:
# Meta-schema elements ignore xsi:type (issue #350)
type_name = obj.attrib[XSI_TYPE].strip()
namespaces = converter.namespaces
try:
xsd_type = self.maps.get_instance_type(type_name, xsd_type, namespaces)
except (KeyError, TypeError) as err:
yield self.validation_error(validation, err, obj, **kwargs)
else:
if self.identities:
xpath_element = XPathElement(self.name, xsd_type)
for identity in self.identities:
if isinstance(identity.elements, tuple) \
or identity.selector is None:
continue # Skip unbuilt or incomplete identities
elif identity.selector.token is None:
raise XMLSchemaNotBuiltError(
identity, "identity selector is not built"
)
context = XPathContext(root=self.schema.xpath_node, item=xpath_element)
for e in identity.selector.token.select_results(context):
if isinstance(e, XsdElement):
if e not in identity.elements:
identity.elements[e] = None
e.selected_by.add(identity)
elif not isinstance(e, XsdAnyElement):
reason = _("selector xpath expression can only select elements")
yield self.validation_error(validation, reason, e, **kwargs)
if xsd_type.is_blocked(self):
reason = _("usage of %r is blocked") % xsd_type
yield self.validation_error(validation, reason, obj, **kwargs)
if xsd_type.abstract:
reason = _("%r is abstract") % xsd_type
yield self.validation_error(validation, reason, obj, **kwargs)
if xsd_type.is_complex() and self.xsd_version == '1.1':
kwargs['id_list'] = [] # Track XSD 1.1 multiple xs:ID attributes/children
content_decoder = xsd_type if isinstance(xsd_type, XsdSimpleType) else xsd_type.content
# Decode attributes
attribute_group = self.get_attributes(xsd_type)
for result in attribute_group.iter_decode(obj.attrib, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, obj, **kwargs)
else:
attributes = result
if self.inheritable and any(name in self.inheritable for name in obj.attrib):
if inherited:
inherited = inherited.copy()
inherited.update((k, v) for k, v in obj.attrib.items() if k in self.inheritable)
else:
inherited = {k: v for k, v in obj.attrib.items() if k in self.inheritable}
kwargs['inherited'] = inherited
# Checks the xsi:nil attribute of the instance
if XSI_NIL in obj.attrib:
xsi_nil = obj.attrib[XSI_NIL].strip()
if not self.nillable:
reason = _("element is not nillable")
yield self.validation_error(validation, reason, obj, **kwargs)
elif xsi_nil not in ('0', '1', 'false', 'true'):
reason = _("xsi:nil attribute must have a boolean value")
yield self.validation_error(validation, reason, obj, **kwargs)
elif xsi_nil in ('0', 'false'):
pass
elif self.fixed is not None:
reason = _("xsi:nil='true' but the element has a fixed value")
yield self.validation_error(validation, reason, obj, **kwargs)
elif obj.text is not None or len(obj):
reason = _("xsi:nil='true' but the element is not empty")
yield self.validation_error(validation, reason, obj, **kwargs)
else:
nilled = True
if xsd_type.is_empty() and obj.text and xsd_type.normalize(obj.text):
reason = _("character data is not allowed because content is empty")
yield self.validation_error(validation, reason, obj, **kwargs)
if nilled:
pass
elif not isinstance(content_decoder, XsdSimpleType):
if not isinstance(xsd_type, XsdSimpleType):
for assertion in xsd_type.assertions:
for error in assertion(obj, **kwargs):
yield self.validation_error(validation, error, **kwargs)
for result in content_decoder.iter_decode(obj, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, obj, **kwargs)
else:
content = result
if content and len(content) == 1 and content[0][0] == 1:
value, content = content[0][1], None
if self.fixed is not None and \
(len(obj) > 0 or value is not None and self.fixed != value):
reason = _("must have the fixed value %r") % self.fixed
yield self.validation_error(validation, reason, obj, **kwargs)
else:
if len(obj):
reason = _("a simple content element can't have child elements")
yield self.validation_error(validation, reason, obj, **kwargs)
text = obj.text
if self.fixed is not None:
if not text:
text = self.fixed
elif text == self.fixed:
pass
elif not strictly_equal(xsd_type.text_decode(text),
xsd_type.text_decode(self.fixed)):
reason = _("must have the fixed value %r") % self.fixed
yield self.validation_error(validation, reason, obj, **kwargs)
elif not text and self.default is not None and kwargs.get('use_defaults', True):
text = self.default
if not isinstance(xsd_type, XsdSimpleType):
for assertion in xsd_type.assertions:
for error in assertion(obj, value=text, **kwargs):
yield self.validation_error(validation, error, **kwargs)
if text and content_decoder.is_list():
value = text.split()
else:
value = text
elif xsd_type.is_notation():
if xsd_type.name == XSD_NOTATION_TYPE:
msg = _("cannot validate against xs:NOTATION directly, "
"only against a subtype with an enumeration facet")
yield self.validation_error(validation, msg, text, **kwargs)
elif not xsd_type.enumeration:
msg = _("missing enumeration facet in xs:NOTATION subtype")
yield self.validation_error(validation, msg, text, **kwargs)
for result in content_decoder.iter_decode(text or '', validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, obj, **kwargs)
elif result is None and 'filler' in kwargs:
value = kwargs['filler'](self)
elif text or kwargs.get('keep_empty'):
value = result
if 'value_hook' in kwargs:
value = kwargs['value_hook'](value, xsd_type)
elif isinstance(value, (int, float, list)) or value is None:
pass
elif isinstance(value, str):
if value.startswith('{') and xsd_type.is_qname():
value = text
elif isinstance(value, Decimal):
try:
value = kwargs['decimal_type'](value)
except (KeyError, TypeError):
pass
elif isinstance(value, (AbstractDateTime, Duration)):
if not kwargs.get('datetime_types'):
value = str(value) if text is None else text.strip()
elif isinstance(value, AbstractBinary):
if not kwargs.get('binary_types'):
value = str(value)
xmlns = converter.set_context(obj, level) # Purge existing sub-contexts
if isinstance(converter, XMLSchemaConverter):
element_data = ElementData(obj.tag, value, content, attributes, xmlns)
if 'element_hook' in kwargs:
element_data = kwargs['element_hook'](element_data, self, xsd_type)
try:
yield converter.element_decode(element_data, self, xsd_type, level)
except (ValueError, TypeError) as err:
yield self.validation_error(validation, err, obj, **kwargs)
elif not level:
yield ElementData(obj.tag, value, None, attributes, None)
if content is not None:
del content
if self.selected_by:
yield from self.collect_key_fields(obj, xsd_type, validation, nilled, **kwargs)
# Apply non XSD optional validations
if 'extra_validator' in kwargs:
try:
result = kwargs['extra_validator'](obj, self)
except XMLSchemaValidationError as err:
yield self.validation_error(validation, err, obj, **kwargs)
else:
if isinstance(result, GeneratorType):
for error in result:
yield self.validation_error(validation, error, obj, **kwargs)
# Disable collect for out of scope identities and check key references
if 'max_depth' not in kwargs:
for identity in self.identities:
counter = identities[identity]
counter.enabled = False
if isinstance(identity, XsdKeyref):
assert isinstance(counter, KeyrefCounter)
for error in counter.iter_errors(identities):
yield self.validation_error(validation, error, obj, **kwargs)
elif level:
for identity in self.identities:
identities[identity].enabled = False
|
(self, obj: Optional[~T], validation: str = 'lax', **kwargs: Any) -> Optional[Any]
|
713,867
|
xmlschema.validators.elements
|
iter_encode
|
Creates an iterator for encoding data to an Element.
:param obj: the data that has to be encoded.
:param validation: the validation mode: can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the encoding process.
:return: yields an Element, eventually preceded by a sequence of validation or encoding errors.
|
def iter_encode(self, obj: Any, validation: str = 'lax', **kwargs: Any) \
-> IterEncodeType[ElementType]:
"""
Creates an iterator for encoding data to an Element.
:param obj: the data that has to be encoded.
:param validation: the validation mode: can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the encoding process.
:return: yields an Element, eventually preceded by a sequence of \
validation or encoding errors.
"""
errors: List[Union[str, Exception]] = []
try:
converter = kwargs['converter']
except KeyError:
converter = self._get_converter(obj, kwargs)
else:
if not isinstance(converter, XMLSchemaConverter):
converter = self._get_converter(obj, kwargs)
try:
level = kwargs['level']
except KeyError:
level = kwargs['level'] = 0
try:
element_data = converter.element_encode(obj, self, level)
except (ValueError, TypeError) as err:
yield self.validation_error(validation, err, obj, **kwargs)
return
if 'max_depth' in kwargs and kwargs['max_depth'] == 0 and not level:
for e in errors:
yield self.validation_error(validation, e, **kwargs)
return
text = None
children = element_data.content
attributes = ()
xsd_type = self.get_type(element_data)
if XSI_TYPE in element_data.attributes and self.schema.meta_schema is not None:
type_name = element_data.attributes[XSI_TYPE].strip()
try:
xsd_type = self.maps.get_instance_type(type_name, xsd_type, converter)
except (KeyError, TypeError) as err:
errors.append(err)
else:
default_namespace = converter.get('')
if default_namespace and not isinstance(xsd_type, XsdSimpleType):
# Adjust attributes mapped into default namespace
ns_part = f'{{{default_namespace}}}'
for k in list(element_data.attributes):
if not k.startswith(ns_part):
continue
elif k in xsd_type.attributes:
continue
local_name = k[len(ns_part):]
if local_name in xsd_type.attributes:
element_data.attributes[local_name] = element_data.attributes[k]
del element_data.attributes[k]
attribute_group = self.get_attributes(xsd_type)
result: Any
for result in attribute_group.iter_encode(element_data.attributes, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
attributes = result
if XSI_NIL in element_data.attributes:
xsi_nil = element_data.attributes[XSI_NIL].strip()
if not self.nillable:
errors.append("element is not nillable.")
elif xsi_nil not in ('0', '1', 'true', 'false'):
errors.append("xsi:nil attribute must has a boolean value.")
elif xsi_nil in ('0', 'false'):
pass
elif self.fixed is not None:
errors.append("xsi:nil='true' but the element has a fixed value.")
elif element_data.text not in (None, '') or element_data.content:
errors.append("xsi:nil='true' but the element is not empty.")
else:
elem = converter.etree_element(element_data.tag, attrib=attributes, level=level)
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
return
if isinstance(xsd_type, XsdSimpleType):
if element_data.content:
errors.append("a simpleType element can't has child elements.")
if element_data.text is not None:
for result in xsd_type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
elif self.fixed is not None:
text = self.fixed
elif self.default is not None and kwargs.get('use_defaults', True):
text = self.default
elif xsd_type.has_simple_content():
if element_data.text is not None:
for result in xsd_type.content.iter_encode(element_data.text,
validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
elif self.fixed is not None:
text = self.fixed
elif self.default is not None and kwargs.get('use_defaults', True):
text = self.default
else:
for result in xsd_type.content.iter_encode(element_data, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
elif result:
text, children = result
elem = converter.etree_element(element_data.tag, text, children, attributes, level)
if errors:
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
del element_data
|
(self, obj: Any, validation: str = 'lax', **kwargs: Any) -> Optional[~T]
|
713,869
|
xmlschema.validators.elements
|
iter_substitutes
| null |
def iter_substitutes(self) -> Iterator['XsdElement']:
if self.parent is None or self.ref is not None:
for xsd_element in self.maps.substitution_groups.get(self.name, ()):
if not xsd_element.abstract:
yield xsd_element
for e in xsd_element.iter_substitutes():
if not e.abstract:
yield e
|
(self) -> Iterator[xmlschema.validators.elements.XsdElement]
|
713,872
|
xmlschema.validators.elements
|
match
| null |
def match(self, name: Optional[str], default_namespace: Optional[str] = None,
**kwargs: Any) -> Optional['XsdElement']:
if not name:
return None
elif default_namespace and name[0] != '{':
name = f'{{{default_namespace}}}{name}'
if name == self.name:
return self
else:
for xsd_element in self.iter_substitutes():
if name == xsd_element.name:
return xsd_element
return None
|
(self, name: Optional[str], default_namespace: Optional[str] = None, **kwargs: Any) -> Optional[xmlschema.validators.elements.XsdElement]
|
713,873
|
xmlschema.validators.elements
|
match_child
| null |
def match_child(self, name: str) -> Optional['XsdElement']:
xsd_group = self.type.model_group
if xsd_group is None:
# fallback to xs:anyType encoder for matching extra content
xsd_group = self.any_type.model_group
assert xsd_group is not None
for xsd_child in xsd_group.iter_elements():
matched_element = xsd_child.match(name, resolve=True)
if isinstance(matched_element, XsdElement):
return matched_element
else:
if name in self.maps.elements and xsd_group.open_content_mode != 'none':
return self.maps.lookup_element(name)
return None
|
(self, name: str) -> Optional[xmlschema.validators.elements.XsdElement]
|
713,875
|
xmlschema.validators.elements
|
to_objects
|
Decodes XML data to Python data objects.
:param obj: the XML data source.
:param with_bindings: if `True` is provided the decoding is done using :class:`DataBindingConverter` that used XML data binding classes. For default the objects are instances of :class:`DataElement` and uses the :class:`DataElementConverter`.
:param kwargs: other optional keyword arguments for the method :func:`iter_decode`, except the argument *converter*.
|
def to_objects(self, obj: ElementType, with_bindings: bool = False, **kwargs: Any) \
-> DecodeType['dataobjects.DataElement']:
"""
Decodes XML data to Python data objects.
:param obj: the XML data source.
:param with_bindings: if `True` is provided the decoding is done using \
:class:`DataBindingConverter` that used XML data binding classes. For \
default the objects are instances of :class:`DataElement` and uses the \
:class:`DataElementConverter`.
:param kwargs: other optional keyword arguments for the method \
:func:`iter_decode`, except the argument *converter*.
"""
if with_bindings:
return self.decode(obj, converter=dataobjects.DataBindingConverter, **kwargs)
return self.decode(obj, converter=dataobjects.DataElementConverter, **kwargs)
|
(self, obj: Optional[~T], with_bindings: bool = False, **kwargs: Any) -> Optional[xmlschema.dataobjects.DataElement]
|
713,879
|
xmlschema.validators.global_maps
|
XsdGlobals
|
Mediator class for related XML schema instances. It stores the global
declarations defined in the registered schemas. Register a schema to
add its declarations to the global maps.
:param validator: the origin schema class/instance used for creating the global maps.
:param validation: the XSD validation mode to use, can be 'strict', 'lax' or 'skip'.
|
class XsdGlobals(XsdValidator):
"""
Mediator class for related XML schema instances. It stores the global
declarations defined in the registered schemas. Register a schema to
add its declarations to the global maps.
:param validator: the origin schema class/instance used for creating the global maps.
:param validation: the XSD validation mode to use, can be 'strict', 'lax' or 'skip'.
"""
types: Dict[str, Union[BaseXsdType, Tuple[ElementType, SchemaType]]]
attributes: Dict[str, Union[XsdAttribute, Tuple[ElementType, SchemaType]]]
attribute_groups: Dict[str, Union[XsdAttributeGroup, Tuple[ElementType, SchemaType]]]
groups: Dict[str, Union[XsdGroup, Tuple[ElementType, SchemaType]]]
notations: Dict[str, Union[XsdNotation, Tuple[ElementType, SchemaType]]]
elements: Dict[str, Union[XsdElement, Tuple[ElementType, SchemaType]]]
substitution_groups: Dict[str, Set[XsdElement]]
identities: Dict[str, XsdIdentity]
global_maps: Tuple[Dict[str, Any], ...]
missing_locations: List[str]
_loaded_schemas: Set['XMLSchemaBase']
_lookup_function_resolver = {
XSD_SIMPLE_TYPE: 'lookup_type',
XSD_COMPLEX_TYPE: 'lookup_type',
XSD_ELEMENT: 'lookup_element',
XSD_GROUP: 'lookup_group',
XSD_ATTRIBUTE: 'lookup_attribute',
XSD_ATTRIBUTE_GROUP: 'lookup_attribute_group',
XSD_NOTATION: 'lookup_notation',
}
def __init__(self, validator: SchemaType, validation: str = 'strict') -> None:
super().__init__(validation)
self.validator = validator
self.namespaces = NamespaceResourcesMap() # Registered schemas by namespace URI
self.missing_locations = [] # Missing or failing resource locations
self.types = {} # Global types (both complex and simple)
self.attributes = {} # Global attributes
self.attribute_groups = {} # Attribute groups
self.groups = {} # Model groups
self.notations = {} # Notations
self.elements = {} # Global elements
self.substitution_groups = {} # Substitution groups
self.identities = {} # Identity constraints (uniqueness, keys, keyref)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
self._builders: Dict[str, Callable[[ElementType, SchemaType], Any]] = {
XSD_NOTATION: validator.xsd_notation_class,
XSD_SIMPLE_TYPE: validator.simple_type_factory,
XSD_COMPLEX_TYPE: validator.xsd_complex_type_class,
XSD_ATTRIBUTE: validator.xsd_attribute_class,
XSD_ATTRIBUTE_GROUP: validator.xsd_attribute_group_class,
XSD_GROUP: validator.xsd_group_class,
XSD_ELEMENT: validator.xsd_element_class,
}
self._loaded_schemas = set()
def __repr__(self) -> str:
return '%s(validator=%r, validation=%r)' % (
self.__class__.__name__, self.validator, self.validation
)
def copy(self, validator: Optional[SchemaType] = None,
validation: Optional[str] = None) -> 'XsdGlobals':
"""
Creates a shallow copy of the object. The associated schemas do not change
the original global maps. This is useful for sharing the same meta-schema
without copying the full tree objects, saving time and memory.
"""
obj = self.__class__(
validator=self.validator if validator is None else validator,
validation=validation or self.validation
)
obj.namespaces.update(self.namespaces)
obj.types.update(self.types)
obj.attributes.update(self.attributes)
obj.attribute_groups.update(self.attribute_groups)
obj.groups.update(self.groups)
obj.notations.update(self.notations)
obj.elements.update(self.elements)
obj.substitution_groups.update(self.substitution_groups)
obj.identities.update(self.identities)
obj._loaded_schemas.update(self._loaded_schemas)
return obj
__copy__ = copy
def lookup(self, tag: str, qname: str) -> SchemaGlobalType:
"""
General lookup method for XSD global components.
:param tag: the expanded QName of the XSD the global declaration/definition \
(e.g. '{http://www.w3.org/2001/XMLSchema}element'), that is used to select \
the global map for lookup.
:param qname: the expanded QName of the component to be looked-up.
:returns: an XSD global component.
:raises: an XMLSchemaValueError if the *tag* argument is not appropriate for a global \
component, an XMLSchemaKeyError if the *qname* argument is not found in the global map.
"""
lookup_function: Callable[[str], SchemaGlobalType]
try:
lookup_function = getattr(self, self._lookup_function_resolver[tag])
except KeyError:
msg = _("wrong tag {!r} for an XSD global definition/declaration")
raise XMLSchemaValueError(msg.format(tag)) from None
else:
return lookup_function(qname)
def lookup_notation(self, qname: str) -> XsdNotation:
try:
obj = self.notations[qname]
except KeyError:
raise XMLSchemaKeyError(f'xs:notation {qname!r} not found')
else:
if isinstance(obj, XsdNotation):
return obj
return cast(XsdNotation, self._build_global(obj, qname, self.notations))
def lookup_type(self, qname: str) -> BaseXsdType:
try:
obj = self.types[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:simpleType/xs:complexType {qname!r} not found')
else:
if isinstance(obj, (XsdSimpleType, XsdComplexType)):
return obj
return cast(BaseXsdType, self._build_global(obj, qname, self.types))
def lookup_attribute(self, qname: str) -> XsdAttribute:
try:
obj = self.attributes[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attribute {qname!r} not found')
else:
if isinstance(obj, XsdAttribute):
return obj
return cast(XsdAttribute, self._build_global(obj, qname, self.attributes))
def lookup_attribute_group(self, qname: str) -> XsdAttributeGroup:
try:
obj = self.attribute_groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attributeGroup {qname!r} not found')
else:
if isinstance(obj, XsdAttributeGroup):
return obj
return cast(XsdAttributeGroup, self._build_global(obj, qname, self.attribute_groups))
def lookup_group(self, qname: str) -> XsdGroup:
try:
obj = self.groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:group {qname!r} not found')
else:
if isinstance(obj, XsdGroup):
return obj
return cast(XsdGroup, self._build_global(obj, qname, self.groups))
def lookup_element(self, qname: str) -> XsdElement:
try:
obj = self.elements[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:element {qname!r} not found')
else:
if isinstance(obj, XsdElement):
return obj
return cast(XsdElement, self._build_global(obj, qname, self.elements))
def _build_global(self, obj: Any, qname: str,
global_map: Dict[str, Any]) -> Any:
factory_or_class: Callable[[ElementType, SchemaType], Any]
if isinstance(obj, tuple):
# Not built XSD global component without redefinitions
try:
elem, schema = obj
except ValueError:
return obj[0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self._builders[elem.tag]
except KeyError:
msg = _("wrong element {0!r} for map {1!r}")
raise XMLSchemaKeyError(msg.format(elem, global_map))
global_map[qname] = obj, # Encapsulate into a tuple to catch circular builds
global_map[qname] = factory_or_class(elem, schema)
return global_map[qname]
elif isinstance(obj, list):
# Not built XSD global component with redefinitions
try:
elem, schema = obj[0]
except ValueError:
return obj[0][0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self._builders[elem.tag]
except KeyError:
msg = _("wrong element {0!r} for map {1!r}")
raise XMLSchemaKeyError(msg.format(elem, global_map))
global_map[qname] = obj[0], # To catch circular builds
global_map[qname] = component = factory_or_class(elem, schema)
# Apply redefinitions (changing elem involve reparse of the component)
for elem, schema in obj[1:]:
if component.schema.target_namespace != schema.target_namespace:
msg = _("redefined schema {!r} has a different targetNamespace")
raise XMLSchemaValueError(msg.format(schema))
component.redefine = component.copy()
component.redefine.parent = component
component.schema = schema
component.elem = elem
return global_map[qname]
else:
msg = _("unexpected instance {!r} in global map")
raise XMLSchemaTypeError(msg.format(obj))
def get_instance_type(self, type_name: str, base_type: BaseXsdType,
namespaces: MutableMapping[str, str]) -> BaseXsdType:
"""
Returns the instance XSI type from global maps, validating it with the reference base type.
:param type_name: the XSI type attribute value, a QName in prefixed format.
:param base_type: the XSD from which the instance type has to be derived.
:param namespaces: a mapping from prefixes to namespaces.
"""
if isinstance(base_type, XsdComplexType) and XSI_TYPE in base_type.attributes:
xsd_attribute = cast(XsdAttribute, base_type.attributes[XSI_TYPE])
xsd_attribute.validate(type_name)
extended_name = get_extended_qname(type_name, namespaces)
xsi_type = self.lookup_type(extended_name)
if xsi_type.is_derived(base_type):
return xsi_type
elif isinstance(base_type, XsdSimpleType) and \
base_type.is_union() and not base_type.facets:
# Can be valid only if the union doesn't have facets, see:
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=4065
if isinstance(base_type, XsdAtomicRestriction) and \
isinstance(base_type.primitive_type, XsdUnion):
if xsi_type in base_type.primitive_type.member_types:
return xsi_type
elif isinstance(base_type, XsdUnion):
if xsi_type in base_type.member_types:
return xsi_type
msg = _("{0!r} cannot substitute {1!r}")
raise XMLSchemaTypeError(msg.format(xsi_type, base_type))
@property
def built(self) -> bool:
return all(schema.built for schema in self.iter_schemas())
@property
def unbuilt(self) -> List[Union[XsdComponent, SchemaType]]:
"""Property that returns a list with unbuilt components."""
return [c for s in self.iter_schemas() for c in s.iter_components()
if c is not s and not c.built]
@property
def validation_attempted(self) -> str:
if self.built:
return 'full'
elif any(schema.validation_attempted == 'partial' for schema in self.iter_schemas()):
return 'partial'
else:
return 'none'
@property
def validity(self) -> str:
if not self.namespaces:
return 'notKnown'
if all(schema.validity == 'valid' for schema in self.iter_schemas()):
return 'valid'
elif any(schema.validity == 'invalid' for schema in self.iter_schemas()):
return 'invalid'
else:
return 'notKnown'
@property
def xsd_version(self) -> str:
return self.validator.XSD_VERSION
@property
def all_errors(self) -> List[XMLSchemaParseError]:
errors = []
for schema in self.iter_schemas():
errors.extend(schema.all_errors)
return errors
def create_bindings(self, *bases: Type[Any], **attrs: Any) -> None:
"""Creates data object bindings for the XSD elements of built schemas."""
for xsd_element in self.iter_components(xsd_classes=XsdElement):
assert isinstance(xsd_element, XsdElement)
if xsd_element.target_namespace != XSD_NAMESPACE:
xsd_element.get_binding(*bases, replace_existing=True, **attrs)
def clear_bindings(self) -> None:
for xsd_element in self.iter_components(xsd_classes=XsdElement):
assert isinstance(xsd_element, XsdElement)
xsd_element.binding = None
def iter_components(self, xsd_classes: ComponentClassType = None) \
-> Iterator[Union['XsdGlobals', XsdComponent]]:
"""Creates an iterator for the XSD components of built schemas."""
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
for xsd_global in self.iter_globals():
yield from xsd_global.iter_components(xsd_classes)
def iter_globals(self) -> Iterator[SchemaGlobalType]:
"""Creates an iterator for the XSD global components of built schemas."""
for global_map in self.global_maps:
yield from global_map.values()
def iter_schemas(self) -> Iterator[SchemaType]:
"""Creates an iterator for the registered schemas."""
for schemas in self.namespaces.values():
yield from schemas
def register(self, schema: SchemaType) -> None:
"""Registers an XMLSchema instance."""
try:
ns_schemas = self.namespaces[schema.target_namespace]
except KeyError:
self.namespaces[schema.target_namespace] = [schema]
else:
if schema in ns_schemas:
return
elif schema.url is None:
# only by multi-source init or add_schema() by user initiative
ns_schemas.append(schema)
elif not any(schema.url == obj.url and schema.__class__ is obj.__class__
for obj in ns_schemas):
ns_schemas.append(schema)
def load_namespace(self, namespace: str, build: bool = True) -> bool:
"""
Load namespace from available location hints. Returns `True` if the namespace
is already loaded or if the namespace can be loaded from one of the locations,
returns `False` otherwise. Failing locations are inserted into the missing
locations list.
:param namespace: the namespace to load.
:param build: if left with `True` value builds the maps after load. If the \
build fails the resource URL is added to missing locations.
"""
namespace = namespace.strip()
if namespace in self.namespaces:
return True
elif self.validator.meta_schema is None:
return False # Do not load additional namespaces for meta-schema (XHTML)
# Try from schemas location hints: usually the namespaces related to these
# hints are already loaded during schema construction, but it's better to
# retry once if the initial load has failed.
for schema in self.iter_schemas():
for url in schema.get_locations(namespace):
if url in self.missing_locations:
continue
try:
if schema.import_schema(namespace, url, schema.base_url) is not None:
if build:
self.build()
except OSError:
pass
except XMLSchemaNotBuiltError:
self.clear(remove_schemas=True, only_unbuilt=True)
self.missing_locations.append(url)
else:
return True
# Try from library location hint, if there is any.
if namespace in self.validator.fallback_locations:
url = self.validator.fallback_locations[namespace]
if url not in self.missing_locations:
try:
if self.validator.import_schema(namespace, url) is not None:
if build:
self.build()
except OSError:
return False
except XMLSchemaNotBuiltError:
self.clear(remove_schemas=True, only_unbuilt=True)
self.missing_locations.append(url)
else:
return True
return False
def clear(self, remove_schemas: bool = False, only_unbuilt: bool = False) -> None:
"""
Clears the instance maps and schemas.
:param remove_schemas: removes also the schema instances.
:param only_unbuilt: removes only not built objects/schemas.
"""
global_map: Dict[str, XsdComponent]
if only_unbuilt:
not_built_schemas = {s for s in self.iter_schemas() if not s.built}
if not not_built_schemas:
return
for global_map in self.global_maps:
for k in list(global_map.keys()):
obj = global_map[k]
if not isinstance(obj, XsdComponent) or obj.schema in not_built_schemas:
del global_map[k]
if k in self.substitution_groups:
del self.substitution_groups[k]
if k in self.identities:
del self.identities[k]
self._loaded_schemas.difference_update(not_built_schemas)
if remove_schemas:
namespaces = NamespaceResourcesMap()
for uri, value in self.namespaces.items():
for schema in value:
if schema not in not_built_schemas:
namespaces[uri] = schema
self.namespaces = namespaces
else:
del self.missing_locations[:]
for global_map in self.global_maps:
global_map.clear()
self.substitution_groups.clear()
self.identities.clear()
self._loaded_schemas.clear()
if remove_schemas:
self.namespaces.clear()
def build(self) -> None:
"""
Build the maps of XSD global definitions/declarations. The global maps are
updated adding and building the globals of not built registered schemas.
"""
meta_schema: Optional['XMLSchemaBase']
try:
meta_schema = self.namespaces[XSD_NAMESPACE][0]
except KeyError:
if self.validator.meta_schema is None:
msg = _("missing XSD namespace in meta-schema instance {!r}")
raise XMLSchemaValueError(msg.format(self.validator))
meta_schema = None
if meta_schema is None or meta_schema.meta_schema is not None:
# XSD namespace not imported or XSD namespace not managed by a meta-schema.
# Creates a new meta-schema instance from the XSD meta-schema source and
# replaces the default meta-schema instance in all registered schemas.
meta_schema = self.validator.create_meta_schema(global_maps=self)
for schema in self.iter_schemas():
if schema.meta_schema is not None:
schema.meta_schema = meta_schema
else:
if not self.types and meta_schema.maps is not self:
for source_map, target_map in zip(meta_schema.maps.global_maps, self.global_maps):
target_map.update(source_map)
self._loaded_schemas.update(meta_schema.maps._loaded_schemas)
not_loaded_schemas = [s for s in self.iter_schemas() if s not in self._loaded_schemas]
for schema in not_loaded_schemas:
schema._root_elements = None
self._loaded_schemas.add(schema)
# Load and build global declarations
load_xsd_simple_types(self.types, not_loaded_schemas)
load_xsd_complex_types(self.types, not_loaded_schemas)
load_xsd_notations(self.notations, not_loaded_schemas)
load_xsd_attributes(self.attributes, not_loaded_schemas)
load_xsd_attribute_groups(self.attribute_groups, not_loaded_schemas)
load_xsd_elements(self.elements, not_loaded_schemas)
load_xsd_groups(self.groups, not_loaded_schemas)
if not meta_schema.built:
xsd_builtin_types_factory(meta_schema, self.types)
if self is not meta_schema.maps:
# Rebuild xs:anyType for maps not owned by the meta-schema
# in order to do a correct namespace lookup for wildcards.
self.types[XSD_ANY_TYPE] = self.validator.create_any_type()
for qname in self.notations:
self.lookup_notation(qname)
for qname in self.attributes:
self.lookup_attribute(qname)
for qname in self.attribute_groups:
self.lookup_attribute_group(qname)
for schema in not_loaded_schemas:
if not isinstance(schema.default_attributes, str):
continue
try:
attributes = schema.maps.attribute_groups[schema.default_attributes]
except KeyError:
schema.default_attributes = None
msg = _("defaultAttributes={0!r} doesn't match any attribute group of {1!r}")
schema.parse_error(
error=msg.format(schema.root.get('defaultAttributes'), schema),
elem=schema.root
)
else:
schema.default_attributes = cast(XsdAttributeGroup, attributes)
for qname in self.types:
self.lookup_type(qname)
for qname in self.elements:
self.lookup_element(qname)
for qname in self.groups:
self.lookup_group(qname)
# Build element declarations inside model groups.
for schema in not_loaded_schemas:
for group in schema.iter_components(XsdGroup):
group.build()
# Build identity references and XSD 1.1 assertions
for schema in not_loaded_schemas:
for obj in schema.iter_components((XsdIdentity, XsdAssert)):
obj.build()
self.check(filter(lambda x: x.meta_schema is not None, not_loaded_schemas), self.validation)
def check(self, schemas: Optional[Iterable[SchemaType]] = None,
validation: str = 'strict') -> None:
"""
Checks the global maps. For default checks all schemas and raises an
exception at first error.
:param schemas: optional argument with the set of the schemas to check.
:param validation: overrides the default validation mode of the validator.
:raise: XMLSchemaParseError
"""
_schemas = set(schemas if schemas is not None else self.iter_schemas())
# Checks substitution groups circularity
for qname in self.substitution_groups:
xsd_element = self.elements[qname]
assert isinstance(xsd_element, XsdElement), _("global element not built!")
if any(e is xsd_element for e in xsd_element.iter_substitutes()):
msg = _("circularity found for substitution group with head element {}")
xsd_element.parse_error(msg.format(xsd_element), validation=validation)
if validation == 'strict' and not self.built:
raise XMLSchemaNotBuiltError(
self, _("global map has unbuilt components: %r") % self.unbuilt
)
# Check redefined global groups restrictions
for group in self.groups.values():
assert isinstance(group, XsdGroup), _("global group not built!")
if group.schema not in _schemas or group.redefine is None:
continue
while group.redefine is not None:
if not any(isinstance(e, XsdGroup) and e.name == group.name for e in group) \
and not group.is_restriction(group.redefine):
msg = _("the redefined group is an illegal restriction")
group.parse_error(msg, validation=validation)
group = group.redefine
# Check complex content types models restrictions
for xsd_global in filter(lambda x: x.schema in _schemas, self.iter_globals()):
xsd_type: Any
for xsd_type in xsd_global.iter_components(XsdComplexType):
if not isinstance(xsd_type.content, XsdGroup):
continue
if xsd_type.derivation == 'restriction':
base_type = xsd_type.base_type
if base_type and base_type.name != XSD_ANY_TYPE and base_type.is_complex():
if not xsd_type.content.is_restriction(base_type.content):
msg = _("the derived group is an illegal restriction")
xsd_type.parse_error(msg, validation=validation)
if base_type.is_complex() and not base_type.open_content and \
xsd_type.open_content and xsd_type.open_content.mode != 'none':
_group = xsd_type.schema.create_any_content_group(
parent=xsd_type,
any_element=xsd_type.open_content.any_element
)
if not _group.is_restriction(base_type.content):
msg = _("restriction has an open content but base type has not")
_group.parse_error(msg, validation=validation)
try:
check_model(xsd_type.content)
except XMLSchemaModelDepthError:
msg = _("can't verify the content model of {!r} "
"due to exceeding of maximum recursion depth")
xsd_type.schema.warnings.append(msg.format(xsd_type))
warnings.warn(msg, XMLSchemaWarning, stacklevel=4)
except XMLSchemaModelError as err:
if validation == 'strict':
raise
xsd_type.errors.append(err)
|
(validator: Optional[~T], validation: str = 'strict') -> None
|
713,880
|
xmlschema.validators.global_maps
|
copy
|
Creates a shallow copy of the object. The associated schemas do not change
the original global maps. This is useful for sharing the same meta-schema
without copying the full tree objects, saving time and memory.
|
def copy(self, validator: Optional[SchemaType] = None,
validation: Optional[str] = None) -> 'XsdGlobals':
"""
Creates a shallow copy of the object. The associated schemas do not change
the original global maps. This is useful for sharing the same meta-schema
without copying the full tree objects, saving time and memory.
"""
obj = self.__class__(
validator=self.validator if validator is None else validator,
validation=validation or self.validation
)
obj.namespaces.update(self.namespaces)
obj.types.update(self.types)
obj.attributes.update(self.attributes)
obj.attribute_groups.update(self.attribute_groups)
obj.groups.update(self.groups)
obj.notations.update(self.notations)
obj.elements.update(self.elements)
obj.substitution_groups.update(self.substitution_groups)
obj.identities.update(self.identities)
obj._loaded_schemas.update(self._loaded_schemas)
return obj
|
(self, validator: Optional[~T] = None, validation: Optional[str] = None) -> xmlschema.validators.global_maps.XsdGlobals
|
713,881
|
xmlschema.validators.global_maps
|
__init__
| null |
def __init__(self, validator: SchemaType, validation: str = 'strict') -> None:
super().__init__(validation)
self.validator = validator
self.namespaces = NamespaceResourcesMap() # Registered schemas by namespace URI
self.missing_locations = [] # Missing or failing resource locations
self.types = {} # Global types (both complex and simple)
self.attributes = {} # Global attributes
self.attribute_groups = {} # Attribute groups
self.groups = {} # Model groups
self.notations = {} # Notations
self.elements = {} # Global elements
self.substitution_groups = {} # Substitution groups
self.identities = {} # Identity constraints (uniqueness, keys, keyref)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
self._builders: Dict[str, Callable[[ElementType, SchemaType], Any]] = {
XSD_NOTATION: validator.xsd_notation_class,
XSD_SIMPLE_TYPE: validator.simple_type_factory,
XSD_COMPLEX_TYPE: validator.xsd_complex_type_class,
XSD_ATTRIBUTE: validator.xsd_attribute_class,
XSD_ATTRIBUTE_GROUP: validator.xsd_attribute_group_class,
XSD_GROUP: validator.xsd_group_class,
XSD_ELEMENT: validator.xsd_element_class,
}
self._loaded_schemas = set()
|
(self, validator: Optional[~T], validation: str = 'strict') -> NoneType
|
713,882
|
xmlschema.validators.global_maps
|
__repr__
| null |
def __repr__(self) -> str:
return '%s(validator=%r, validation=%r)' % (
self.__class__.__name__, self.validator, self.validation
)
|
(self) -> str
|
713,883
|
xmlschema.validators.global_maps
|
_build_global
| null |
def _build_global(self, obj: Any, qname: str,
global_map: Dict[str, Any]) -> Any:
factory_or_class: Callable[[ElementType, SchemaType], Any]
if isinstance(obj, tuple):
# Not built XSD global component without redefinitions
try:
elem, schema = obj
except ValueError:
return obj[0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self._builders[elem.tag]
except KeyError:
msg = _("wrong element {0!r} for map {1!r}")
raise XMLSchemaKeyError(msg.format(elem, global_map))
global_map[qname] = obj, # Encapsulate into a tuple to catch circular builds
global_map[qname] = factory_or_class(elem, schema)
return global_map[qname]
elif isinstance(obj, list):
# Not built XSD global component with redefinitions
try:
elem, schema = obj[0]
except ValueError:
return obj[0][0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = self._builders[elem.tag]
except KeyError:
msg = _("wrong element {0!r} for map {1!r}")
raise XMLSchemaKeyError(msg.format(elem, global_map))
global_map[qname] = obj[0], # To catch circular builds
global_map[qname] = component = factory_or_class(elem, schema)
# Apply redefinitions (changing elem involve reparse of the component)
for elem, schema in obj[1:]:
if component.schema.target_namespace != schema.target_namespace:
msg = _("redefined schema {!r} has a different targetNamespace")
raise XMLSchemaValueError(msg.format(schema))
component.redefine = component.copy()
component.redefine.parent = component
component.schema = schema
component.elem = elem
return global_map[qname]
else:
msg = _("unexpected instance {!r} in global map")
raise XMLSchemaTypeError(msg.format(obj))
|
(self, obj: Any, qname: str, global_map: Dict[str, Any]) -> Any
|
713,885
|
xmlschema.validators.global_maps
|
build
|
Build the maps of XSD global definitions/declarations. The global maps are
updated adding and building the globals of not built registered schemas.
|
def build(self) -> None:
"""
Build the maps of XSD global definitions/declarations. The global maps are
updated adding and building the globals of not built registered schemas.
"""
meta_schema: Optional['XMLSchemaBase']
try:
meta_schema = self.namespaces[XSD_NAMESPACE][0]
except KeyError:
if self.validator.meta_schema is None:
msg = _("missing XSD namespace in meta-schema instance {!r}")
raise XMLSchemaValueError(msg.format(self.validator))
meta_schema = None
if meta_schema is None or meta_schema.meta_schema is not None:
# XSD namespace not imported or XSD namespace not managed by a meta-schema.
# Creates a new meta-schema instance from the XSD meta-schema source and
# replaces the default meta-schema instance in all registered schemas.
meta_schema = self.validator.create_meta_schema(global_maps=self)
for schema in self.iter_schemas():
if schema.meta_schema is not None:
schema.meta_schema = meta_schema
else:
if not self.types and meta_schema.maps is not self:
for source_map, target_map in zip(meta_schema.maps.global_maps, self.global_maps):
target_map.update(source_map)
self._loaded_schemas.update(meta_schema.maps._loaded_schemas)
not_loaded_schemas = [s for s in self.iter_schemas() if s not in self._loaded_schemas]
for schema in not_loaded_schemas:
schema._root_elements = None
self._loaded_schemas.add(schema)
# Load and build global declarations
load_xsd_simple_types(self.types, not_loaded_schemas)
load_xsd_complex_types(self.types, not_loaded_schemas)
load_xsd_notations(self.notations, not_loaded_schemas)
load_xsd_attributes(self.attributes, not_loaded_schemas)
load_xsd_attribute_groups(self.attribute_groups, not_loaded_schemas)
load_xsd_elements(self.elements, not_loaded_schemas)
load_xsd_groups(self.groups, not_loaded_schemas)
if not meta_schema.built:
xsd_builtin_types_factory(meta_schema, self.types)
if self is not meta_schema.maps:
# Rebuild xs:anyType for maps not owned by the meta-schema
# in order to do a correct namespace lookup for wildcards.
self.types[XSD_ANY_TYPE] = self.validator.create_any_type()
for qname in self.notations:
self.lookup_notation(qname)
for qname in self.attributes:
self.lookup_attribute(qname)
for qname in self.attribute_groups:
self.lookup_attribute_group(qname)
for schema in not_loaded_schemas:
if not isinstance(schema.default_attributes, str):
continue
try:
attributes = schema.maps.attribute_groups[schema.default_attributes]
except KeyError:
schema.default_attributes = None
msg = _("defaultAttributes={0!r} doesn't match any attribute group of {1!r}")
schema.parse_error(
error=msg.format(schema.root.get('defaultAttributes'), schema),
elem=schema.root
)
else:
schema.default_attributes = cast(XsdAttributeGroup, attributes)
for qname in self.types:
self.lookup_type(qname)
for qname in self.elements:
self.lookup_element(qname)
for qname in self.groups:
self.lookup_group(qname)
# Build element declarations inside model groups.
for schema in not_loaded_schemas:
for group in schema.iter_components(XsdGroup):
group.build()
# Build identity references and XSD 1.1 assertions
for schema in not_loaded_schemas:
for obj in schema.iter_components((XsdIdentity, XsdAssert)):
obj.build()
self.check(filter(lambda x: x.meta_schema is not None, not_loaded_schemas), self.validation)
|
(self) -> NoneType
|
713,886
|
xmlschema.validators.global_maps
|
check
|
Checks the global maps. For default checks all schemas and raises an
exception at first error.
:param schemas: optional argument with the set of the schemas to check.
:param validation: overrides the default validation mode of the validator.
:raise: XMLSchemaParseError
|
def check(self, schemas: Optional[Iterable[SchemaType]] = None,
validation: str = 'strict') -> None:
"""
Checks the global maps. For default checks all schemas and raises an
exception at first error.
:param schemas: optional argument with the set of the schemas to check.
:param validation: overrides the default validation mode of the validator.
:raise: XMLSchemaParseError
"""
_schemas = set(schemas if schemas is not None else self.iter_schemas())
# Checks substitution groups circularity
for qname in self.substitution_groups:
xsd_element = self.elements[qname]
assert isinstance(xsd_element, XsdElement), _("global element not built!")
if any(e is xsd_element for e in xsd_element.iter_substitutes()):
msg = _("circularity found for substitution group with head element {}")
xsd_element.parse_error(msg.format(xsd_element), validation=validation)
if validation == 'strict' and not self.built:
raise XMLSchemaNotBuiltError(
self, _("global map has unbuilt components: %r") % self.unbuilt
)
# Check redefined global groups restrictions
for group in self.groups.values():
assert isinstance(group, XsdGroup), _("global group not built!")
if group.schema not in _schemas or group.redefine is None:
continue
while group.redefine is not None:
if not any(isinstance(e, XsdGroup) and e.name == group.name for e in group) \
and not group.is_restriction(group.redefine):
msg = _("the redefined group is an illegal restriction")
group.parse_error(msg, validation=validation)
group = group.redefine
# Check complex content types models restrictions
for xsd_global in filter(lambda x: x.schema in _schemas, self.iter_globals()):
xsd_type: Any
for xsd_type in xsd_global.iter_components(XsdComplexType):
if not isinstance(xsd_type.content, XsdGroup):
continue
if xsd_type.derivation == 'restriction':
base_type = xsd_type.base_type
if base_type and base_type.name != XSD_ANY_TYPE and base_type.is_complex():
if not xsd_type.content.is_restriction(base_type.content):
msg = _("the derived group is an illegal restriction")
xsd_type.parse_error(msg, validation=validation)
if base_type.is_complex() and not base_type.open_content and \
xsd_type.open_content and xsd_type.open_content.mode != 'none':
_group = xsd_type.schema.create_any_content_group(
parent=xsd_type,
any_element=xsd_type.open_content.any_element
)
if not _group.is_restriction(base_type.content):
msg = _("restriction has an open content but base type has not")
_group.parse_error(msg, validation=validation)
try:
check_model(xsd_type.content)
except XMLSchemaModelDepthError:
msg = _("can't verify the content model of {!r} "
"due to exceeding of maximum recursion depth")
xsd_type.schema.warnings.append(msg.format(xsd_type))
warnings.warn(msg, XMLSchemaWarning, stacklevel=4)
except XMLSchemaModelError as err:
if validation == 'strict':
raise
xsd_type.errors.append(err)
|
(self, schemas: Optional[Iterable[Optional[~T]]] = None, validation: str = 'strict') -> NoneType
|
713,887
|
xmlschema.validators.global_maps
|
clear
|
Clears the instance maps and schemas.
:param remove_schemas: removes also the schema instances.
:param only_unbuilt: removes only not built objects/schemas.
|
def clear(self, remove_schemas: bool = False, only_unbuilt: bool = False) -> None:
"""
Clears the instance maps and schemas.
:param remove_schemas: removes also the schema instances.
:param only_unbuilt: removes only not built objects/schemas.
"""
global_map: Dict[str, XsdComponent]
if only_unbuilt:
not_built_schemas = {s for s in self.iter_schemas() if not s.built}
if not not_built_schemas:
return
for global_map in self.global_maps:
for k in list(global_map.keys()):
obj = global_map[k]
if not isinstance(obj, XsdComponent) or obj.schema in not_built_schemas:
del global_map[k]
if k in self.substitution_groups:
del self.substitution_groups[k]
if k in self.identities:
del self.identities[k]
self._loaded_schemas.difference_update(not_built_schemas)
if remove_schemas:
namespaces = NamespaceResourcesMap()
for uri, value in self.namespaces.items():
for schema in value:
if schema not in not_built_schemas:
namespaces[uri] = schema
self.namespaces = namespaces
else:
del self.missing_locations[:]
for global_map in self.global_maps:
global_map.clear()
self.substitution_groups.clear()
self.identities.clear()
self._loaded_schemas.clear()
if remove_schemas:
self.namespaces.clear()
|
(self, remove_schemas: bool = False, only_unbuilt: bool = False) -> NoneType
|
713,888
|
xmlschema.validators.global_maps
|
clear_bindings
| null |
def clear_bindings(self) -> None:
for xsd_element in self.iter_components(xsd_classes=XsdElement):
assert isinstance(xsd_element, XsdElement)
xsd_element.binding = None
|
(self) -> NoneType
|
713,890
|
xmlschema.validators.global_maps
|
create_bindings
|
Creates data object bindings for the XSD elements of built schemas.
|
def create_bindings(self, *bases: Type[Any], **attrs: Any) -> None:
"""Creates data object bindings for the XSD elements of built schemas."""
for xsd_element in self.iter_components(xsd_classes=XsdElement):
assert isinstance(xsd_element, XsdElement)
if xsd_element.target_namespace != XSD_NAMESPACE:
xsd_element.get_binding(*bases, replace_existing=True, **attrs)
|
(self, *bases: Type[Any], **attrs: Any) -> NoneType
|
713,891
|
xmlschema.validators.global_maps
|
get_instance_type
|
Returns the instance XSI type from global maps, validating it with the reference base type.
:param type_name: the XSI type attribute value, a QName in prefixed format.
:param base_type: the XSD from which the instance type has to be derived.
:param namespaces: a mapping from prefixes to namespaces.
|
def get_instance_type(self, type_name: str, base_type: BaseXsdType,
namespaces: MutableMapping[str, str]) -> BaseXsdType:
"""
Returns the instance XSI type from global maps, validating it with the reference base type.
:param type_name: the XSI type attribute value, a QName in prefixed format.
:param base_type: the XSD from which the instance type has to be derived.
:param namespaces: a mapping from prefixes to namespaces.
"""
if isinstance(base_type, XsdComplexType) and XSI_TYPE in base_type.attributes:
xsd_attribute = cast(XsdAttribute, base_type.attributes[XSI_TYPE])
xsd_attribute.validate(type_name)
extended_name = get_extended_qname(type_name, namespaces)
xsi_type = self.lookup_type(extended_name)
if xsi_type.is_derived(base_type):
return xsi_type
elif isinstance(base_type, XsdSimpleType) and \
base_type.is_union() and not base_type.facets:
# Can be valid only if the union doesn't have facets, see:
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=4065
if isinstance(base_type, XsdAtomicRestriction) and \
isinstance(base_type.primitive_type, XsdUnion):
if xsi_type in base_type.primitive_type.member_types:
return xsi_type
elif isinstance(base_type, XsdUnion):
if xsi_type in base_type.member_types:
return xsi_type
msg = _("{0!r} cannot substitute {1!r}")
raise XMLSchemaTypeError(msg.format(xsi_type, base_type))
|
(self, type_name: str, base_type: Optional[~T], namespaces: MutableMapping[str, str]) -> Optional[~T]
|
713,892
|
xmlschema.validators.global_maps
|
iter_components
|
Creates an iterator for the XSD components of built schemas.
|
def iter_components(self, xsd_classes: ComponentClassType = None) \
-> Iterator[Union['XsdGlobals', XsdComponent]]:
"""Creates an iterator for the XSD components of built schemas."""
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
for xsd_global in self.iter_globals():
yield from xsd_global.iter_components(xsd_classes)
|
(self, xsd_classes: Optional[~T] = None) -> Iterator[Union[xmlschema.validators.global_maps.XsdGlobals, xmlschema.validators.xsdbase.XsdComponent]]
|
713,893
|
xmlschema.validators.global_maps
|
iter_globals
|
Creates an iterator for the XSD global components of built schemas.
|
def iter_globals(self) -> Iterator[SchemaGlobalType]:
"""Creates an iterator for the XSD global components of built schemas."""
for global_map in self.global_maps:
yield from global_map.values()
|
(self) -> Iterator[Optional[~T]]
|
713,894
|
xmlschema.validators.global_maps
|
iter_schemas
|
Creates an iterator for the registered schemas.
|
def iter_schemas(self) -> Iterator[SchemaType]:
"""Creates an iterator for the registered schemas."""
for schemas in self.namespaces.values():
yield from schemas
|
(self) -> Iterator[Optional[~T]]
|
713,895
|
xmlschema.validators.global_maps
|
load_namespace
|
Load namespace from available location hints. Returns `True` if the namespace
is already loaded or if the namespace can be loaded from one of the locations,
returns `False` otherwise. Failing locations are inserted into the missing
locations list.
:param namespace: the namespace to load.
:param build: if left with `True` value builds the maps after load. If the build fails the resource URL is added to missing locations.
|
def load_namespace(self, namespace: str, build: bool = True) -> bool:
"""
Load namespace from available location hints. Returns `True` if the namespace
is already loaded or if the namespace can be loaded from one of the locations,
returns `False` otherwise. Failing locations are inserted into the missing
locations list.
:param namespace: the namespace to load.
:param build: if left with `True` value builds the maps after load. If the \
build fails the resource URL is added to missing locations.
"""
namespace = namespace.strip()
if namespace in self.namespaces:
return True
elif self.validator.meta_schema is None:
return False # Do not load additional namespaces for meta-schema (XHTML)
# Try from schemas location hints: usually the namespaces related to these
# hints are already loaded during schema construction, but it's better to
# retry once if the initial load has failed.
for schema in self.iter_schemas():
for url in schema.get_locations(namespace):
if url in self.missing_locations:
continue
try:
if schema.import_schema(namespace, url, schema.base_url) is not None:
if build:
self.build()
except OSError:
pass
except XMLSchemaNotBuiltError:
self.clear(remove_schemas=True, only_unbuilt=True)
self.missing_locations.append(url)
else:
return True
# Try from library location hint, if there is any.
if namespace in self.validator.fallback_locations:
url = self.validator.fallback_locations[namespace]
if url not in self.missing_locations:
try:
if self.validator.import_schema(namespace, url) is not None:
if build:
self.build()
except OSError:
return False
except XMLSchemaNotBuiltError:
self.clear(remove_schemas=True, only_unbuilt=True)
self.missing_locations.append(url)
else:
return True
return False
|
(self, namespace: str, build: bool = True) -> bool
|
713,896
|
xmlschema.validators.global_maps
|
lookup
|
General lookup method for XSD global components.
:param tag: the expanded QName of the XSD the global declaration/definition (e.g. '{http://www.w3.org/2001/XMLSchema}element'), that is used to select the global map for lookup.
:param qname: the expanded QName of the component to be looked-up.
:returns: an XSD global component.
:raises: an XMLSchemaValueError if the *tag* argument is not appropriate for a global component, an XMLSchemaKeyError if the *qname* argument is not found in the global map.
|
def lookup(self, tag: str, qname: str) -> SchemaGlobalType:
"""
General lookup method for XSD global components.
:param tag: the expanded QName of the XSD the global declaration/definition \
(e.g. '{http://www.w3.org/2001/XMLSchema}element'), that is used to select \
the global map for lookup.
:param qname: the expanded QName of the component to be looked-up.
:returns: an XSD global component.
:raises: an XMLSchemaValueError if the *tag* argument is not appropriate for a global \
component, an XMLSchemaKeyError if the *qname* argument is not found in the global map.
"""
lookup_function: Callable[[str], SchemaGlobalType]
try:
lookup_function = getattr(self, self._lookup_function_resolver[tag])
except KeyError:
msg = _("wrong tag {!r} for an XSD global definition/declaration")
raise XMLSchemaValueError(msg.format(tag)) from None
else:
return lookup_function(qname)
|
(self, tag: str, qname: str) -> Optional[~T]
|
713,897
|
xmlschema.validators.global_maps
|
lookup_attribute
| null |
def lookup_attribute(self, qname: str) -> XsdAttribute:
try:
obj = self.attributes[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attribute {qname!r} not found')
else:
if isinstance(obj, XsdAttribute):
return obj
return cast(XsdAttribute, self._build_global(obj, qname, self.attributes))
|
(self, qname: str) -> xmlschema.validators.attributes.XsdAttribute
|
713,898
|
xmlschema.validators.global_maps
|
lookup_attribute_group
| null |
def lookup_attribute_group(self, qname: str) -> XsdAttributeGroup:
try:
obj = self.attribute_groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:attributeGroup {qname!r} not found')
else:
if isinstance(obj, XsdAttributeGroup):
return obj
return cast(XsdAttributeGroup, self._build_global(obj, qname, self.attribute_groups))
|
(self, qname: str) -> xmlschema.validators.attributes.XsdAttributeGroup
|
713,899
|
xmlschema.validators.global_maps
|
lookup_element
| null |
def lookup_element(self, qname: str) -> XsdElement:
try:
obj = self.elements[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:element {qname!r} not found')
else:
if isinstance(obj, XsdElement):
return obj
return cast(XsdElement, self._build_global(obj, qname, self.elements))
|
(self, qname: str) -> xmlschema.validators.elements.XsdElement
|
713,900
|
xmlschema.validators.global_maps
|
lookup_group
| null |
def lookup_group(self, qname: str) -> XsdGroup:
try:
obj = self.groups[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:group {qname!r} not found')
else:
if isinstance(obj, XsdGroup):
return obj
return cast(XsdGroup, self._build_global(obj, qname, self.groups))
|
(self, qname: str) -> xmlschema.validators.groups.XsdGroup
|
713,901
|
xmlschema.validators.global_maps
|
lookup_notation
| null |
def lookup_notation(self, qname: str) -> XsdNotation:
try:
obj = self.notations[qname]
except KeyError:
raise XMLSchemaKeyError(f'xs:notation {qname!r} not found')
else:
if isinstance(obj, XsdNotation):
return obj
return cast(XsdNotation, self._build_global(obj, qname, self.notations))
|
(self, qname: str) -> xmlschema.validators.notations.XsdNotation
|
713,902
|
xmlschema.validators.global_maps
|
lookup_type
| null |
def lookup_type(self, qname: str) -> BaseXsdType:
try:
obj = self.types[qname]
except KeyError:
raise XMLSchemaKeyError(f'global xs:simpleType/xs:complexType {qname!r} not found')
else:
if isinstance(obj, (XsdSimpleType, XsdComplexType)):
return obj
return cast(BaseXsdType, self._build_global(obj, qname, self.types))
|
(self, qname: str) -> Optional[~T]
|
713,904
|
xmlschema.validators.global_maps
|
register
|
Registers an XMLSchema instance.
|
def register(self, schema: SchemaType) -> None:
"""Registers an XMLSchema instance."""
try:
ns_schemas = self.namespaces[schema.target_namespace]
except KeyError:
self.namespaces[schema.target_namespace] = [schema]
else:
if schema in ns_schemas:
return
elif schema.url is None:
# only by multi-source init or add_schema() by user initiative
ns_schemas.append(schema)
elif not any(schema.url == obj.url and schema.__class__ is obj.__class__
for obj in ns_schemas):
ns_schemas.append(schema)
|
(self, schema: Optional[~T]) -> NoneType
|
713,906
|
xmlschema.validators.xsdbase
|
XsdType
|
Common base class for XSD types.
|
class XsdType(XsdComponent):
"""Common base class for XSD types."""
abstract = False
base_type: Optional[BaseXsdType] = None
derivation: Optional[str] = None
_final: Optional[str] = None
@property
def final(self) -> str:
return self.schema.final_default if self._final is None else self._final
@property
def built(self) -> bool:
raise NotImplementedError()
@property
def content_type_label(self) -> str:
"""The content type classification."""
raise NotImplementedError()
@property
def sequence_type(self) -> str:
"""The XPath sequence type associated with the content."""
raise NotImplementedError()
@property
def root_type(self) -> BaseXsdType:
"""
The root type of the type definition hierarchy. For an atomic type
is the primitive type. For a list is the primitive type of the item.
For a union is the base union type. For a complex type is xs:anyType.
"""
raise NotImplementedError()
@property
def simple_type(self) -> Optional['XsdSimpleType']:
"""
Property that is the instance itself for a simpleType. For a
complexType is the instance's *content* if this is a simpleType
or `None` if the instance's *content* is a model group.
"""
raise NotImplementedError()
@property
def model_group(self) -> Optional['XsdGroup']:
"""
Property that is `None` for a simpleType. For a complexType is
the instance's *content* if this is a model group or `None` if
the instance's *content* is a simpleType.
"""
return None
@staticmethod
def is_simple() -> bool:
"""Returns `True` if the instance is a simpleType, `False` otherwise."""
raise NotImplementedError()
@staticmethod
def is_complex() -> bool:
"""Returns `True` if the instance is a complexType, `False` otherwise."""
raise NotImplementedError()
def is_atomic(self) -> bool:
"""Returns `True` if the instance is an atomic simpleType, `False` otherwise."""
return False
def is_list(self) -> bool:
"""Returns `True` if the instance is a list simpleType, `False` otherwise."""
return False
def is_union(self) -> bool:
"""Returns `True` if the instance is a union simpleType, `False` otherwise."""
return False
def is_datetime(self) -> bool:
"""
Returns `True` if the instance is a datetime/duration XSD builtin-type, `False` otherwise.
"""
return False
def is_empty(self) -> bool:
"""Returns `True` if the instance has an empty content, `False` otherwise."""
raise NotImplementedError()
def is_emptiable(self) -> bool:
"""Returns `True` if the instance has an emptiable value or content, `False` otherwise."""
raise NotImplementedError()
def has_simple_content(self) -> bool:
"""
Returns `True` if the instance has a simple content, `False` otherwise.
"""
raise NotImplementedError()
def has_complex_content(self) -> bool:
"""
Returns `True` if the instance is a complexType with mixed or element-only
content, `False` otherwise.
"""
raise NotImplementedError()
def has_mixed_content(self) -> bool:
"""
Returns `True` if the instance is a complexType with mixed content, `False` otherwise.
"""
raise NotImplementedError()
def is_element_only(self) -> bool:
"""
Returns `True` if the instance is a complexType with element-only content,
`False` otherwise.
"""
raise NotImplementedError()
def is_derived(self, other: Union[BaseXsdType, Tuple[ElementType, SchemaType]],
derivation: Optional[str] = None) -> bool:
"""
Returns `True` if the instance is derived from *other*, `False` otherwise.
The optional argument derivation can be a string containing the words
'extension' or 'restriction' or both.
"""
raise NotImplementedError()
def is_extension(self) -> bool:
return self.derivation == 'extension'
def is_restriction(self) -> bool:
return self.derivation == 'restriction'
def is_blocked(self, xsd_element: 'XsdElement') -> bool:
"""
Returns `True` if the base type derivation is blocked, `False` otherwise.
"""
xsd_type = xsd_element.type
if self is xsd_type:
return False
block = f'{xsd_element.block} {xsd_type.block}'.strip()
if not block:
return False
_block = {x for x in block.split() if x in ('extension', 'restriction')}
return any(self.is_derived(xsd_type, derivation) for derivation in _block)
def is_dynamic_consistent(self, other: Any) -> bool:
raise NotImplementedError()
def is_key(self) -> bool:
return self.name == XSD_ID or self.is_derived(self.maps.types[XSD_ID])
def is_qname(self) -> bool:
return self.name == XSD_QNAME or self.is_derived(self.maps.types[XSD_QNAME])
def is_notation(self) -> bool:
return self.name == XSD_NOTATION_TYPE or self.is_derived(self.maps.types[XSD_NOTATION_TYPE])
def is_decimal(self) -> bool:
return self.name == XSD_DECIMAL or self.is_derived(self.maps.types[XSD_DECIMAL])
def text_decode(self, text: str) -> Any:
raise NotImplementedError()
|
(elem: Optional[~T], schema: Optional[~T], parent: Optional[ForwardRef('XsdComponent')] = None, name: Optional[str] = None) -> None
|
713,922
|
xmlschema.validators.xsdbase
|
has_complex_content
|
Returns `True` if the instance is a complexType with mixed or element-only
content, `False` otherwise.
|
def has_complex_content(self) -> bool:
"""
Returns `True` if the instance is a complexType with mixed or element-only
content, `False` otherwise.
"""
raise NotImplementedError()
|
(self) -> bool
|
713,923
|
xmlschema.validators.xsdbase
|
has_mixed_content
|
Returns `True` if the instance is a complexType with mixed content, `False` otherwise.
|
def has_mixed_content(self) -> bool:
"""
Returns `True` if the instance is a complexType with mixed content, `False` otherwise.
"""
raise NotImplementedError()
|
(self) -> bool
|
713,924
|
xmlschema.validators.xsdbase
|
has_simple_content
|
Returns `True` if the instance has a simple content, `False` otherwise.
|
def has_simple_content(self) -> bool:
"""
Returns `True` if the instance has a simple content, `False` otherwise.
"""
raise NotImplementedError()
|
(self) -> bool
|
713,925
|
xmlschema.validators.xsdbase
|
is_atomic
|
Returns `True` if the instance is an atomic simpleType, `False` otherwise.
|
def is_atomic(self) -> bool:
"""Returns `True` if the instance is an atomic simpleType, `False` otherwise."""
return False
|
(self) -> bool
|
713,926
|
xmlschema.validators.xsdbase
|
is_blocked
|
Returns `True` if the base type derivation is blocked, `False` otherwise.
|
def is_blocked(self, xsd_element: 'XsdElement') -> bool:
"""
Returns `True` if the base type derivation is blocked, `False` otherwise.
"""
xsd_type = xsd_element.type
if self is xsd_type:
return False
block = f'{xsd_element.block} {xsd_type.block}'.strip()
if not block:
return False
_block = {x for x in block.split() if x in ('extension', 'restriction')}
return any(self.is_derived(xsd_type, derivation) for derivation in _block)
|
(self, xsd_element: 'XsdElement') -> bool
|
713,927
|
xmlschema.validators.xsdbase
|
is_complex
|
Returns `True` if the instance is a complexType, `False` otherwise.
|
@staticmethod
def is_complex() -> bool:
"""Returns `True` if the instance is a complexType, `False` otherwise."""
raise NotImplementedError()
|
() -> bool
|
713,928
|
xmlschema.validators.xsdbase
|
is_datetime
|
Returns `True` if the instance is a datetime/duration XSD builtin-type, `False` otherwise.
|
def is_datetime(self) -> bool:
"""
Returns `True` if the instance is a datetime/duration XSD builtin-type, `False` otherwise.
"""
return False
|
(self) -> bool
|
713,929
|
xmlschema.validators.xsdbase
|
is_decimal
| null |
def is_decimal(self) -> bool:
return self.name == XSD_DECIMAL or self.is_derived(self.maps.types[XSD_DECIMAL])
|
(self) -> bool
|
713,930
|
xmlschema.validators.xsdbase
|
is_derived
|
Returns `True` if the instance is derived from *other*, `False` otherwise.
The optional argument derivation can be a string containing the words
'extension' or 'restriction' or both.
|
def is_derived(self, other: Union[BaseXsdType, Tuple[ElementType, SchemaType]],
derivation: Optional[str] = None) -> bool:
"""
Returns `True` if the instance is derived from *other*, `False` otherwise.
The optional argument derivation can be a string containing the words
'extension' or 'restriction' or both.
"""
raise NotImplementedError()
|
(self, other: Union[~T, NoneType, Tuple[Optional[~T], Optional[~T]]], derivation: Optional[str] = None) -> bool
|
713,931
|
xmlschema.validators.xsdbase
|
is_dynamic_consistent
| null |
def is_dynamic_consistent(self, other: Any) -> bool:
raise NotImplementedError()
|
(self, other: Any) -> bool
|
713,932
|
xmlschema.validators.xsdbase
|
is_element_only
|
Returns `True` if the instance is a complexType with element-only content,
`False` otherwise.
|
def is_element_only(self) -> bool:
"""
Returns `True` if the instance is a complexType with element-only content,
`False` otherwise.
"""
raise NotImplementedError()
|
(self) -> bool
|
713,933
|
xmlschema.validators.xsdbase
|
is_emptiable
|
Returns `True` if the instance has an emptiable value or content, `False` otherwise.
|
def is_emptiable(self) -> bool:
"""Returns `True` if the instance has an emptiable value or content, `False` otherwise."""
raise NotImplementedError()
|
(self) -> bool
|
713,934
|
xmlschema.validators.xsdbase
|
is_empty
|
Returns `True` if the instance has an empty content, `False` otherwise.
|
def is_empty(self) -> bool:
"""Returns `True` if the instance has an empty content, `False` otherwise."""
raise NotImplementedError()
|
(self) -> bool
|
713,935
|
xmlschema.validators.xsdbase
|
is_extension
| null |
def is_extension(self) -> bool:
return self.derivation == 'extension'
|
(self) -> bool
|
713,937
|
xmlschema.validators.xsdbase
|
is_key
| null |
def is_key(self) -> bool:
return self.name == XSD_ID or self.is_derived(self.maps.types[XSD_ID])
|
(self) -> bool
|
713,938
|
xmlschema.validators.xsdbase
|
is_list
|
Returns `True` if the instance is a list simpleType, `False` otherwise.
|
def is_list(self) -> bool:
"""Returns `True` if the instance is a list simpleType, `False` otherwise."""
return False
|
(self) -> bool
|
713,940
|
xmlschema.validators.xsdbase
|
is_notation
| null |
def is_notation(self) -> bool:
return self.name == XSD_NOTATION_TYPE or self.is_derived(self.maps.types[XSD_NOTATION_TYPE])
|
(self) -> bool
|
713,942
|
xmlschema.validators.xsdbase
|
is_qname
| null |
def is_qname(self) -> bool:
return self.name == XSD_QNAME or self.is_derived(self.maps.types[XSD_QNAME])
|
(self) -> bool
|
713,943
|
xmlschema.validators.xsdbase
|
is_restriction
| null |
def is_restriction(self) -> bool:
return self.derivation == 'restriction'
|
(self) -> bool
|
713,944
|
xmlschema.validators.xsdbase
|
is_simple
|
Returns `True` if the instance is a simpleType, `False` otherwise.
|
@staticmethod
def is_simple() -> bool:
"""Returns `True` if the instance is a simpleType, `False` otherwise."""
raise NotImplementedError()
|
() -> bool
|
713,945
|
xmlschema.validators.xsdbase
|
is_union
|
Returns `True` if the instance is a union simpleType, `False` otherwise.
|
def is_union(self) -> bool:
"""Returns `True` if the instance is a union simpleType, `False` otherwise."""
return False
|
(self) -> bool
|
713,950
|
xmlschema.validators.xsdbase
|
text_decode
| null |
def text_decode(self, text: str) -> Any:
raise NotImplementedError()
|
(self, text: str) -> Any
|
713,957
|
xmlschema.exports
|
download_schemas
|
Download one or more schemas from a URL and save them in a target directory. All the
referred locations in schema sources are downloaded and stored in the target directory.
:param url: The URL of the schema to download, usually a remote one.
:param target: the target directory to save the schema.
:param save_remote: if to save remote schemas, defaults to `True`.
:param save_locations: for default save a LOCATION_MAP dictionary to a `__init__.py`, that can be imported in your code to provide a *uri_mapper* argument for build the schema instance. Provide `False` to skip the package file creation in the target directory.
:param modify: provide `True` to modify original schemas, defaults to `False`.
:param defuse: when to defuse XML data before loading, defaults to `'remote'`.
:param timeout: the timeout in seconds for the connection attempt in case of remote data.
:param exclude_locations: provide a list of locations to skip.
:param loglevel: for setting a different logging level for schema downloads call.
:return: a dictionary containing the map of modified locations.
|
@property
def schema_locations(self) -> Set[str]:
"""Extract schema locations from XSD resource tree."""
locations = set()
for child in self.resource.root:
if child.tag in (XSD_IMPORT, XSD_INCLUDE, XSD_REDEFINE, XSD_OVERRIDE):
schema_location = child.get('schemaLocation', '').strip()
if schema_location:
locations.add(schema_location)
return locations
|
(url: str, target: Union[str, pathlib.Path], save_remote: bool = True, save_locations: bool = True, modify: bool = False, defuse: str = 'remote', timeout: int = 300, exclude_locations: Optional[List[str]] = None, loglevel: Union[str, int, NoneType] = None) -> Dict[str, str]
|
713,958
|
elementpath.etree
|
etree_tostring
|
Serialize an Element tree to a string.
:param elem: the Element instance.
:param namespaces: is an optional mapping from namespace prefix to URI. Provided namespaces are registered before serialization. Ignored if the provided *elem* argument is a lxml Element instance.
:param indent: the base line indentation.
:param max_lines: if truncate serialization after a number of lines (default: do not truncate).
:param spaces_for_tab: number of spaces for replacing tab characters. For default tabs are replaced with 4 spaces, provide `None` to keep tab characters.
:param xml_declaration: if set to `True` inserts the XML declaration at the head.
:param encoding: if "unicode" (the default) the output is a string, otherwise it’s binary.
:param method: is either "xml" (the default), "html" or "text".
:return: a Unicode string.
|
def etree_tostring(elem: ElementProtocol,
namespaces: Optional[MutableMapping[str, str]] = None,
indent: str = '',
max_lines: Optional[int] = None,
spaces_for_tab: Optional[int] = 4,
xml_declaration: Optional[bool] = None,
encoding: str = 'unicode',
method: str = 'xml') -> Union[str, bytes]:
"""
Serialize an Element tree to a string.
:param elem: the Element instance.
:param namespaces: is an optional mapping from namespace prefix to URI. \
Provided namespaces are registered before serialization. Ignored if the \
provided *elem* argument is a lxml Element instance.
:param indent: the base line indentation.
:param max_lines: if truncate serialization after a number of lines \
(default: do not truncate).
:param spaces_for_tab: number of spaces for replacing tab characters. For \
default tabs are replaced with 4 spaces, provide `None` to keep tab characters.
:param xml_declaration: if set to `True` inserts the XML declaration at the head.
:param encoding: if "unicode" (the default) the output is a string, \
otherwise it’s binary.
:param method: is either "xml" (the default), "html" or "text".
:return: a Unicode string.
"""
def reindent(line: str) -> str:
if not line:
return line
elif line.startswith(min_indent):
return line[start:] if start >= 0 else indent[start:] + line
else:
return indent + line
etree_module: Any
if not is_etree_element(elem):
raise TypeError(f"{elem!r} is not an Element")
elif isinstance(elem, PyElementTree.Element):
etree_module = PyElementTree
elif not hasattr(elem, 'nsmap'):
etree_module = ElementTree
else:
etree_module = importlib.import_module('lxml.etree')
if namespaces and not hasattr(elem, 'nsmap'):
default_namespace = namespaces.get('')
for prefix, uri in namespaces.items():
if prefix and not re.match(r'ns\d+$', prefix):
etree_module.register_namespace(prefix, uri)
if uri == default_namespace:
default_namespace = None
if default_namespace:
etree_module.register_namespace('', default_namespace)
xml_text = etree_module.tostring(elem, encoding=encoding, method=method)
if isinstance(xml_text, bytes):
xml_text = xml_text.decode('utf-8')
if spaces_for_tab is not None:
xml_text = xml_text.replace('\t', ' ' * spaces_for_tab)
if xml_text.startswith('<?xml '):
if xml_declaration is False:
lines = xml_text.splitlines()[1:]
else:
lines = xml_text.splitlines()
elif xml_declaration and encoding.lower() != 'unicode':
lines = ['<?xml version="1.0" encoding="{}"?>'.format(encoding)]
lines.extend(xml_text.splitlines())
else:
lines = xml_text.splitlines()
# Clear ending empty lines
while lines and not lines[-1].strip():
lines.pop(-1)
if not lines or method == 'text' or (not indent and not max_lines):
if encoding == 'unicode':
return '\n'.join(lines)
return '\n'.join(lines).encode(encoding)
last_indent = ' ' * min(k for k in range(len(lines[-1])) if lines[-1][k] != ' ')
if len(lines) > 2:
try:
child_indent = ' ' * min(
k for line in lines[1:-1] for k in range(len(line)) if line[k] != ' '
)
except ValueError:
child_indent = ''
min_indent = min(child_indent, last_indent)
else:
min_indent = child_indent = last_indent
start = len(min_indent) - len(indent)
if max_lines is not None and len(lines) > max_lines + 2:
lines = lines[:max_lines] + [child_indent + '...'] * 2 + lines[-1:]
if encoding == 'unicode':
return '\n'.join(reindent(line) for line in lines)
return '\n'.join(reindent(line) for line in lines).encode(encoding)
|
(elem: elementpath.protocols.ElementProtocol, namespaces: Optional[MutableMapping[str, str]] = None, indent: str = '', max_lines: Optional[int] = None, spaces_for_tab: Optional[int] = 4, xml_declaration: Optional[bool] = None, encoding: str = 'unicode', method: str = 'xml') -> Union[str, bytes]
|
713,961
|
xmlschema.resources
|
fetch_namespaces
|
Fetches namespaces information from the XML data source. The argument *source*
can be a string containing the XML document or file path or an url or a file-like
object or an ElementTree instance or an Element instance. A dictionary with
namespace mappings is returned.
|
def fetch_namespaces(source: XMLSourceType,
base_url: Optional[str] = None,
allow: str = 'all',
defuse: str = 'remote',
timeout: int = 30,
root_only: bool = False) -> NamespacesType:
"""
Fetches namespaces information from the XML data source. The argument *source*
can be a string containing the XML document or file path or an url or a file-like
object or an ElementTree instance or an Element instance. A dictionary with
namespace mappings is returned.
"""
resource = XMLResource(source, base_url, allow, defuse, timeout, lazy=True)
return resource.get_namespaces(root_only=root_only)
|
(source: Optional[~T], base_url: Optional[str] = None, allow: str = 'all', defuse: str = 'remote', timeout: int = 30, root_only: bool = False) -> Optional[~T]
|
713,962
|
xmlschema.resources
|
fetch_resource
|
Fetches a resource by trying to access it. If the resource is accessible
returns its normalized URL, otherwise raises an `urllib.error.URLError`.
:param location: a URL or a file path.
:param base_url: reference base URL for normalizing local and relative URLs.
:param timeout: the timeout in seconds for the connection attempt in case of remote data.
:return: a normalized URL.
|
def fetch_resource(location: str, base_url: Optional[str] = None, timeout: int = 30) -> str:
"""
Fetches a resource by trying to access it. If the resource is accessible
returns its normalized URL, otherwise raises an `urllib.error.URLError`.
:param location: a URL or a file path.
:param base_url: reference base URL for normalizing local and relative URLs.
:param timeout: the timeout in seconds for the connection attempt in case of remote data.
:return: a normalized URL.
"""
if not location:
raise XMLSchemaValueError("the 'location' argument must contain a not empty string")
url = normalize_url(location, base_url)
try:
with urlopen(url, timeout=timeout):
return url
except URLError:
if url == normalize_url(location):
raise
else:
# fallback using the location without a base URL
alt_url = normalize_url(location)
with urlopen(alt_url, timeout=timeout):
return alt_url
|
(location: str, base_url: Optional[str] = None, timeout: int = 30) -> str
|
713,963
|
xmlschema.resources
|
fetch_schema
|
Like :meth:`fetch_schema_locations` but returns only the URL of a loadable XSD
schema from location hints fetched from the source or provided by argument.
|
def fetch_schema(source: Union['XMLResource', XMLSourceType],
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
allow: str = 'all',
defuse: str = 'remote',
timeout: int = 30,
uri_mapper: Optional[UriMapperType] = None,
root_only: bool = True) -> str:
"""
Like :meth:`fetch_schema_locations` but returns only the URL of a loadable XSD
schema from location hints fetched from the source or provided by argument.
"""
return fetch_schema_locations(source, locations, base_url, allow,
defuse, timeout, uri_mapper, root_only)[0]
|
(source: Union[xmlschema.resources.XMLResource, ~T, NoneType], locations: Optional[~T] = None, base_url: Optional[str] = None, allow: str = 'all', defuse: str = 'remote', timeout: int = 30, uri_mapper: Optional[~T] = None, root_only: bool = True) -> str
|
713,964
|
xmlschema.resources
|
fetch_schema_locations
|
Fetches schema location hints from an XML data source and a list of location hints.
If an accessible schema location is not found raises a ValueError.
:param source: can be an :class:`XMLResource` instance, a file-like object a path to a file or a URI of a resource or an Element instance or an ElementTree instance or a string containing the XML data. If the passed argument is not an :class:`XMLResource` instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param locations: a dictionary or dictionary items with additional schema location hints.
:param base_url: the same argument of the :class:`XMLResource`.
:param allow: the same argument of the :class:`XMLResource`, applied to location hints only.
:param defuse: the same argument of the :class:`XMLResource`.
:param timeout: the same argument of the :class:`XMLResource` but with a reduced default.
:param uri_mapper: an optional argument for building the schema from location hints.
:param root_only: if `True` extracts from the XML source only the location hints of the root element.
:return: A 2-tuple with the URL referring to the first reachable schema resource and a list of dictionary items with normalized location hints.
|
def fetch_schema_locations(source: Union['XMLResource', XMLSourceType],
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
allow: str = 'all',
defuse: str = 'remote',
timeout: int = 30,
uri_mapper: Optional[UriMapperType] = None,
root_only: bool = True) -> Tuple[str, NormalizedLocationsType]:
"""
Fetches schema location hints from an XML data source and a list of location hints.
If an accessible schema location is not found raises a ValueError.
:param source: can be an :class:`XMLResource` instance, a file-like object a path \
to a file or a URI of a resource or an Element instance or an ElementTree instance or \
a string containing the XML data. If the passed argument is not an :class:`XMLResource` \
instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param locations: a dictionary or dictionary items with additional schema location hints.
:param base_url: the same argument of the :class:`XMLResource`.
:param allow: the same argument of the :class:`XMLResource`, \
applied to location hints only.
:param defuse: the same argument of the :class:`XMLResource`.
:param timeout: the same argument of the :class:`XMLResource` but with a reduced default.
:param uri_mapper: an optional argument for building the schema from location hints.
:param root_only: if `True` extracts from the XML source only the location hints \
of the root element.
:return: A 2-tuple with the URL referring to the first reachable schema resource \
and a list of dictionary items with normalized location hints.
"""
if not isinstance(source, XMLResource):
resource = XMLResource(source, base_url, defuse=defuse, timeout=timeout, lazy=True)
else:
resource = source
locations = resource.get_locations(locations, root_only=root_only)
if not locations:
raise XMLSchemaValueError("provided arguments don't contain any schema location hint")
namespace = resource.namespace
for ns, location in sorted(locations, key=lambda x: x[0] != namespace):
try:
resource = XMLResource(location, base_url, allow, defuse, timeout,
lazy=True, uri_mapper=uri_mapper)
except (XMLResourceError, URLError, ElementTree.ParseError):
continue
if resource.namespace == XSD_NAMESPACE and resource.url:
return resource.url, locations
else:
raise XMLSchemaValueError("not found a schema for provided XML source")
|
(source: Union[xmlschema.resources.XMLResource, ~T, NoneType], locations: Optional[~T] = None, base_url: Optional[str] = None, allow: str = 'all', defuse: str = 'remote', timeout: int = 30, uri_mapper: Optional[~T] = None, root_only: bool = True) -> Tuple[str, Optional[~T]]
|
713,965
|
xmlschema.documents
|
from_json
|
Deserialize JSON data to an XML Element.
:param source: can be a string or a :meth:`read()` supporting file-like object containing the JSON document.
:param schema: an :class:`XMLSchema10` or an :class:`XMLSchema11` instance.
:param cls: class to use for building the schema instance (for default uses :class:`XMLSchema10`).
:param path: is an optional XPath expression for selecting the element of the schema that matches the data that has to be encoded. For default the first global element of the schema is used.
:param validation: the XSD validation mode. Can be 'strict', 'lax' or 'skip'.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param use_defaults: whether to use default values for filling missing data.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for the encoding.
:param unordered: a flag for explicitly activating unordered encoding mode for content model data. This mode uses content models for a reordered-by-model iteration of the child elements.
:param json_options: a dictionary with options for the JSON deserializer.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_encode` and options for converter.
:return: An element tree's Element instance. If ``validation='lax'`` keyword argument is provided the validation errors are collected and returned coupled in a tuple with the Element instance.
:raises: :exc:`XMLSchemaValidationError` if the object is not encodable by the schema, or also if it's invalid when ``validation='strict'`` is provided.
|
def from_json(source: Union[str, bytes, IO[str]],
schema: Optional[Union[XMLSchemaBase, SchemaSourceType]] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
validation: str = 'strict',
namespaces: Optional[NamespacesType] = None,
use_defaults: bool = True,
converter: Optional[ConverterType] = None,
unordered: bool = False,
json_options: Optional[Dict[str, Any]] = None,
**kwargs: Any) -> EncodeType[ElementType]:
"""
Deserialize JSON data to an XML Element.
:param source: can be a string or a :meth:`read()` supporting file-like object \
containing the JSON document.
:param schema: an :class:`XMLSchema10` or an :class:`XMLSchema11` instance.
:param cls: class to use for building the schema instance (for default uses \
:class:`XMLSchema10`).
:param path: is an optional XPath expression for selecting the element of the schema \
that matches the data that has to be encoded. For default the first global element of \
the schema is used.
:param validation: the XSD validation mode. Can be 'strict', 'lax' or 'skip'.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param use_defaults: whether to use default values for filling missing data.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for \
the encoding.
:param unordered: a flag for explicitly activating unordered encoding mode for \
content model data. This mode uses content models for a reordered-by-model \
iteration of the child elements.
:param json_options: a dictionary with options for the JSON deserializer.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_encode` and \
options for converter.
:return: An element tree's Element instance. If ``validation='lax'`` keyword argument is \
provided the validation errors are collected and returned coupled in a tuple with the \
Element instance.
:raises: :exc:`XMLSchemaValidationError` if the object is not encodable by the schema, \
or also if it's invalid when ``validation='strict'`` is provided.
"""
if json_options is None:
json_options = {}
if isinstance(source, (str, bytes)):
obj = json.loads(source, **json_options)
else:
obj = json.load(source, **json_options)
return to_etree(
obj=obj,
schema=schema,
cls=cls,
path=path,
validation=validation,
namespaces=namespaces,
use_defaults=use_defaults,
converter=converter,
unordered=unordered,
**kwargs
)
|
(source: Union[str, bytes, IO[str]], schema: Union[xmlschema.validators.schemas.XMLSchemaBase, ~T, NoneType] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, validation: str = 'strict', namespaces: Optional[~T] = None, use_defaults: bool = True, converter: Optional[~T] = None, unordered: bool = False, json_options: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Optional[~T]
|
713,967
|
xmlschema.documents
|
is_valid
|
Like :meth:`validate` except that do not raise an exception but returns ``True`` if
the XML document is valid, ``False`` if it's invalid.
|
def is_valid(xml_document: Union[XMLSourceType, XMLResource],
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
schema_path: Optional[str] = None,
use_defaults: bool = True,
namespaces: Optional[NamespacesType] = None,
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True) -> bool:
"""
Like :meth:`validate` except that do not raise an exception but returns ``True`` if
the XML document is valid, ``False`` if it's invalid.
"""
source, schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints)
return schema.is_valid(source, path, schema_path, use_defaults, namespaces,
use_location_hints=use_location_hints)
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[~T] = None, locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True) -> bool
|
713,968
|
xmlschema.documents
|
iter_decode
|
Creates an iterator for decoding an XML source to a data structure. For default
the document is validated during the decoding phase and if it's invalid then one
or more :exc:`XMLSchemaValidationError` instances are yielded before the decoded data.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path to a file or a URI of a resource or an Element instance or an ElementTree instance or a string containing the XML data. If the passed argument is not an :class:`XMLResource` instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param schema: can be a schema instance or a file-like object or a file path or a URL of a resource or a string containing the schema.
:param cls: class to use for building the schema instance (for default uses :class:`XMLSchema10`).
:param path: is an optional XPath expression that matches the elements of the XML data that have to be decoded. If not provided the XML root element is used.
:param validation: defines the XSD validation mode to use for decode, can be 'strict', 'lax' or 'skip'.
:param locations: additional schema location hints, in case a schema instance has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the :class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the :class:`XMLResource` instance.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param use_location_hints: for default, in case a schema instance has to be built, uses also schema locations hints provided within XML data. Set this option to `False` to ignore these schema location hints.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_decode` as keyword arguments.
:raises: :exc:`XMLSchemaValidationError` if the XML document is invalid and ``validation='strict'`` is provided.
|
def iter_decode(xml_document: Union[XMLSourceType, XMLResource],
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
validation: str = 'lax',
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True,
**kwargs: Any) -> Iterator[Union[Any, XMLSchemaValidationError]]:
"""
Creates an iterator for decoding an XML source to a data structure. For default
the document is validated during the decoding phase and if it's invalid then one
or more :exc:`XMLSchemaValidationError` instances are yielded before the decoded data.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path \
to a file or a URI of a resource or an Element instance or an ElementTree instance or \
a string containing the XML data. If the passed argument is not an :class:`XMLResource` \
instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param schema: can be a schema instance or a file-like object or a file path or a URL \
of a resource or a string containing the schema.
:param cls: class to use for building the schema instance (for default uses \
:class:`XMLSchema10`).
:param path: is an optional XPath expression that matches the elements of the XML \
data that have to be decoded. If not provided the XML root element is used.
:param validation: defines the XSD validation mode to use for decode, can be \
'strict', 'lax' or 'skip'.
:param locations: additional schema location hints, in case a schema instance \
has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for \
default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param use_location_hints: for default, in case a schema instance has \
to be built, uses also schema locations hints provided within XML data. \
Set this option to `False` to ignore these schema location hints.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_decode` \
as keyword arguments.
:raises: :exc:`XMLSchemaValidationError` if the XML document is invalid and \
``validation='strict'`` is provided.
"""
source, _schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints)
yield from _schema.iter_decode(source, path=path, validation=validation,
use_location_hints=use_location_hints, **kwargs)
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, validation: str = 'lax', locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True, **kwargs: Any) -> Iterator[Union[Any, xmlschema.validators.exceptions.XMLSchemaValidationError]]
|
713,969
|
xmlschema.documents
|
iter_errors
|
Creates an iterator for the errors generated by the validation of an XML document.
Takes the same arguments of the function :meth:`validate`.
|
def iter_errors(xml_document: Union[XMLSourceType, XMLResource],
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
schema_path: Optional[str] = None,
use_defaults: bool = True,
namespaces: Optional[NamespacesType] = None,
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True) -> Iterator[XMLSchemaValidationError]:
"""
Creates an iterator for the errors generated by the validation of an XML document.
Takes the same arguments of the function :meth:`validate`.
"""
source, schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints)
return schema.iter_errors(source, path, schema_path, use_defaults, namespaces,
use_location_hints=use_location_hints)
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[~T] = None, locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True) -> Iterator[xmlschema.validators.exceptions.XMLSchemaValidationError]
|
713,974
|
xmlschema.locations
|
normalize_locations
|
Returns a list of normalized locations. The locations are normalized using
the base URL of the instance.
:param locations: a dictionary or a list of couples containing namespace location hints.
:param base_url: the reference base URL for construct the normalized URL from the argument.
:param keep_relative: if set to `True` keeps relative file paths, which would not strictly conformant to URL format specification.
:return: a list of couples containing normalized namespace location hints.
|
def normalize_locations(locations: LocationsType,
base_url: Optional[str] = None,
keep_relative: bool = False) -> NormalizedLocationsType:
"""
Returns a list of normalized locations. The locations are normalized using
the base URL of the instance.
:param locations: a dictionary or a list of couples containing namespace location hints.
:param base_url: the reference base URL for construct the normalized URL from the argument.
:param keep_relative: if set to `True` keeps relative file paths, which would not strictly \
conformant to URL format specification.
:return: a list of couples containing normalized namespace location hints.
"""
normalized_locations = []
if isinstance(locations, MutableMapping):
for ns, value in locations.items():
if isinstance(value, list):
normalized_locations.extend(
[(ns, normalize_url(url, base_url, keep_relative)) for url in value]
)
else:
normalized_locations.append((ns, normalize_url(value, base_url, keep_relative)))
else:
normalized_locations.extend(
[(ns, normalize_url(url, base_url, keep_relative)) for ns, url in locations]
)
return normalized_locations
|
(locations: Optional[~T], base_url: Optional[str] = None, keep_relative: bool = False) -> Optional[~T]
|
713,975
|
xmlschema.locations
|
normalize_url
|
Returns a normalized URL eventually joining it to a base URL if it's a relative path.
Path names are converted to 'file' scheme URLs and unsafe characters are encoded.
Query and fragments parts are kept only for non-local URLs
:param url: a relative or absolute URL.
:param base_url: a reference base URL.
:param keep_relative: if set to `True` keeps relative file paths, which would not strictly conformant to specification (RFC 8089), because *urlopen()* doesn't accept a simple pathname.
:param method: method used to encode query and fragment parts. If set to `html` the whitespaces are replaced with `+` characters.
:return: a normalized URL string.
|
def normalize_url(url: str, base_url: Optional[str] = None,
keep_relative: bool = False, method: str = 'xml') -> str:
"""
Returns a normalized URL eventually joining it to a base URL if it's a relative path.
Path names are converted to 'file' scheme URLs and unsafe characters are encoded.
Query and fragments parts are kept only for non-local URLs
:param url: a relative or absolute URL.
:param base_url: a reference base URL.
:param keep_relative: if set to `True` keeps relative file paths, which would \
not strictly conformant to specification (RFC 8089), because *urlopen()* doesn't \
accept a simple pathname.
:param method: method used to encode query and fragment parts. If set to `html` \
the whitespaces are replaced with `+` characters.
:return: a normalized URL string.
"""
url_parts = urlsplit(url)
if not is_local_scheme(url_parts.scheme):
return encode_url(url_parts.geturl(), method)
path = LocationPath.from_uri(url)
if path.is_absolute():
return path.normalize().as_uri()
if base_url is not None:
base_url_parts = urlsplit(base_url)
base_path = LocationPath.from_uri(base_url)
if is_local_scheme(base_url_parts.scheme):
path = base_path.joinpath(path)
elif not url_parts.scheme:
url = urlunsplit((
base_url_parts.scheme,
base_url_parts.netloc,
base_path.joinpath(path).normalize().as_posix(),
url_parts.query,
url_parts.fragment
))
return encode_url(url, method)
if path.is_absolute() or keep_relative:
return path.normalize().as_uri()
base_path = LocationPath(os.getcwd())
return base_path.joinpath(path).normalize().as_uri()
|
(url: str, base_url: Optional[str] = None, keep_relative: bool = False, method: str = 'xml') -> str
|
713,977
|
xmlschema.documents
|
to_dict
|
Decodes an XML document to a Python's nested dictionary. Takes the same arguments
of the function :meth:`iter_decode`, but *validation* mode defaults to 'strict'.
:return: an object containing the decoded data. If ``validation='lax'`` is provided validation errors are collected and returned in a tuple with the decoded data.
:raises: :exc:`XMLSchemaValidationError` if the XML document is invalid and ``validation='strict'`` is provided.
|
def to_dict(xml_document: Union[XMLSourceType, XMLResource],
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
validation: str = 'strict',
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True,
**kwargs: Any) -> DecodeType[Any]:
"""
Decodes an XML document to a Python's nested dictionary. Takes the same arguments
of the function :meth:`iter_decode`, but *validation* mode defaults to 'strict'.
:return: an object containing the decoded data. If ``validation='lax'`` is provided \
validation errors are collected and returned in a tuple with the decoded data.
:raises: :exc:`XMLSchemaValidationError` if the XML document is invalid and \
``validation='strict'`` is provided.
"""
source, _schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints)
return _schema.decode(source, path=path, validation=validation,
use_location_hints=use_location_hints, **kwargs)
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, validation: str = 'strict', locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True, **kwargs: Any) -> Optional[Any]
|
713,978
|
xmlschema.documents
|
to_etree
|
Encodes a data structure/object to an ElementTree's Element.
:param obj: the Python object that has to be encoded to XML data.
:param schema: can be a schema instance or a file-like object or a file path or a URL of a resource or a string containing the schema. If not provided a dummy schema is used.
:param cls: class to use for building the schema instance (for default uses :class:`XMLSchema10`).
:param path: is an optional XPath expression for selecting the element of the schema that matches the data that has to be encoded. For default the first global element of the schema is used.
:param validation: the XSD validation mode. Can be 'strict', 'lax' or 'skip'.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param use_defaults: whether to use default values for filling missing data.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for the encoding.
:param unordered: a flag for explicitly activating unordered encoding mode for content model data. This mode uses content models for a reordered-by-model iteration of the child elements.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_encode` and options for the converter.
:return: An element tree's Element instance. If ``validation='lax'`` keyword argument is provided the validation errors are collected and returned coupled in a tuple with the Element instance.
:raises: :exc:`XMLSchemaValidationError` if the object is not encodable by the schema, or also if it's invalid when ``validation='strict'`` is provided.
|
def to_etree(obj: Any,
schema: Optional[Union[XMLSchemaBase, SchemaSourceType]] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
validation: str = 'strict',
namespaces: Optional[NamespacesType] = None,
use_defaults: bool = True,
converter: Optional[ConverterType] = None,
unordered: bool = False,
**kwargs: Any) -> EncodeType[ElementType]:
"""
Encodes a data structure/object to an ElementTree's Element.
:param obj: the Python object that has to be encoded to XML data.
:param schema: can be a schema instance or a file-like object or a file path or a URL \
of a resource or a string containing the schema. If not provided a dummy schema is used.
:param cls: class to use for building the schema instance (for default uses \
:class:`XMLSchema10`).
:param path: is an optional XPath expression for selecting the element of the schema \
that matches the data that has to be encoded. For default the first global element of \
the schema is used.
:param validation: the XSD validation mode. Can be 'strict', 'lax' or 'skip'.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param use_defaults: whether to use default values for filling missing data.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for \
the encoding.
:param unordered: a flag for explicitly activating unordered encoding mode for \
content model data. This mode uses content models for a reordered-by-model \
iteration of the child elements.
:param kwargs: other optional arguments of :meth:`XMLSchemaBase.iter_encode` and \
options for the converter.
:return: An element tree's Element instance. If ``validation='lax'`` keyword argument is \
provided the validation errors are collected and returned coupled in a tuple with the \
Element instance.
:raises: :exc:`XMLSchemaValidationError` if the object is not encodable by the schema, \
or also if it's invalid when ``validation='strict'`` is provided.
"""
if cls is None:
cls = XMLSchema10
elif not issubclass(cls, XMLSchemaBase):
raise XMLSchemaTypeError("invalid schema class %r" % cls)
if schema is None:
if not path:
raise XMLSchemaTypeError("without schema a path is required "
"for building a dummy schema")
if namespaces is None:
tag = get_extended_qname(path, {'xsd': XSD_NAMESPACE, 'xs': XSD_NAMESPACE})
else:
tag = get_extended_qname(path, namespaces)
if not tag.startswith('{') and ':' in tag:
raise XMLSchemaTypeError("without schema the path must be "
"mappable to a local or extended name")
if tag == XSD_SCHEMA:
assert cls.meta_schema is not None
_schema = cls.meta_schema
else:
_schema = get_dummy_schema(tag, cls)
elif isinstance(schema, XMLSchemaBase):
_schema = schema
else:
_schema = cls(schema)
return _schema.encode(
obj=obj,
path=path,
validation=validation,
namespaces=namespaces,
use_defaults=use_defaults,
converter=converter,
unordered=unordered,
**kwargs
)
|
(obj: Any, schema: Union[xmlschema.validators.schemas.XMLSchemaBase, ~T, NoneType] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, validation: str = 'strict', namespaces: Optional[~T] = None, use_defaults: bool = True, converter: Optional[~T] = None, unordered: bool = False, **kwargs: Any) -> Optional[~T]
|
713,979
|
xmlschema.documents
|
to_json
|
Serialize an XML document to JSON. For default the XML data is validated during
the decoding phase. Raises an :exc:`XMLSchemaValidationError` if the XML document
is not validated against the schema.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path to a file or a URI of a resource or an Element instance or an ElementTree instance or a string containing the XML data. If the passed argument is not an :class:`XMLResource` instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param fp: can be a :meth:`write()` supporting file-like object.
:param schema: can be a schema instance or a file-like object or a file path or a URL of a resource or a string containing the schema.
:param cls: schema class to use for building the instance (for default uses :class:`XMLSchema10`).
:param path: is an optional XPath expression that matches the elements of the XML data that have to be decoded. If not provided the XML root element is used.
:param validation: defines the XSD validation mode to use for decode, can be 'strict', 'lax' or 'skip'.
:param locations: additional schema location hints, in case the schema instance has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the :class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the :class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param use_location_hints: for default, in case a schema instance has to be built, uses also schema locations hints provided within XML data. Set this option to `False` to ignore these schema location hints.
:param json_options: a dictionary with options for the JSON serializer.
:param kwargs: optional arguments of :meth:`XMLSchemaBase.iter_decode` as keyword arguments to variate the decoding process.
:return: a string containing the JSON data if *fp* is `None`, otherwise doesn't return anything. If ``validation='lax'`` keyword argument is provided the validation errors are collected and returned, eventually coupled in a tuple with the JSON data.
:raises: :exc:`XMLSchemaValidationError` if the object is not decodable by the XSD component, or also if it's invalid when ``validation='strict'`` is provided.
|
def to_json(xml_document: Union[XMLSourceType, XMLResource],
fp: Optional[IO[str]] = None,
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
validation: str = 'strict',
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True,
json_options: Optional[Dict[str, Any]] = None,
**kwargs: Any) -> JsonDecodeType:
"""
Serialize an XML document to JSON. For default the XML data is validated during
the decoding phase. Raises an :exc:`XMLSchemaValidationError` if the XML document
is not validated against the schema.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path \
to a file or a URI of a resource or an Element instance or an ElementTree instance or \
a string containing the XML data. If the passed argument is not an :class:`XMLResource` \
instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param fp: can be a :meth:`write()` supporting file-like object.
:param schema: can be a schema instance or a file-like object or a file path or a URL \
of a resource or a string containing the schema.
:param cls: schema class to use for building the instance (for default uses \
:class:`XMLSchema10`).
:param path: is an optional XPath expression that matches the elements of the XML \
data that have to be decoded. If not provided the XML root element is used.
:param validation: defines the XSD validation mode to use for decode, can be \
'strict', 'lax' or 'skip'.
:param locations: additional schema location hints, in case the schema instance \
has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for \
default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param use_location_hints: for default, in case a schema instance has \
to be built, uses also schema locations hints provided within XML data. \
Set this option to `False` to ignore these schema location hints.
:param json_options: a dictionary with options for the JSON serializer.
:param kwargs: optional arguments of :meth:`XMLSchemaBase.iter_decode` as keyword arguments \
to variate the decoding process.
:return: a string containing the JSON data if *fp* is `None`, otherwise doesn't \
return anything. If ``validation='lax'`` keyword argument is provided the validation \
errors are collected and returned, eventually coupled in a tuple with the JSON data.
:raises: :exc:`XMLSchemaValidationError` if the object is not decodable by \
the XSD component, or also if it's invalid when ``validation='strict'`` is provided.
"""
dummy_schema = validation == 'skip'
source, _schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints, dummy_schema)
if json_options is None:
json_options = {}
if 'decimal_type' not in kwargs:
kwargs['decimal_type'] = float
errors: List[XMLSchemaValidationError] = []
if path is None and source.is_lazy() and 'cls' not in json_options:
json_options['cls'] = get_lazy_json_encoder(errors)
obj = _schema.decode(source, path=path, validation=validation,
use_location_hints=use_location_hints, **kwargs)
if isinstance(obj, tuple):
errors.extend(obj[1])
if fp is not None:
json.dump(obj[0], fp, **json_options)
return tuple(errors)
else:
result = json.dumps(obj[0], **json_options)
return result, tuple(errors)
elif fp is not None:
json.dump(obj, fp, **json_options)
return None if not errors else tuple(errors)
else:
result = json.dumps(obj, **json_options)
return result if not errors else (result, tuple(errors))
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], fp: Optional[IO[str]] = None, schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, validation: str = 'strict', locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True, json_options: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Optional[~T]
|
713,981
|
xmlschema.documents
|
validate
|
Validates an XML document against a schema instance. This function builds an
:class:`XMLSchema` object for validating the XML document. Raises an
:exc:`XMLSchemaValidationError` if the XML document is not validated against
the schema.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path to a file or a URI of a resource or an Element instance or an ElementTree instance or a string containing the XML data. If the passed argument is not an :class:`XMLResource` instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param schema: can be a schema instance or a file-like object or a file path or a URL of a resource or a string containing the schema.
:param cls: class to use for building the schema instance (for default :class:`XMLSchema10` is used).
:param path: is an optional XPath expression that matches the elements of the XML data that have to be decoded. If not provided the XML root element is used.
:param schema_path: an XPath expression to select the XSD element to use for decoding. If not provided the *path* argument or the *source* root tag are used.
:param use_defaults: defines when to use element and attribute defaults for filling missing required values.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param locations: additional schema location hints, used if a schema instance has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the :class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the :class:`XMLResource` instance.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param use_location_hints: for default, in case a schema instance has to be built, uses also schema locations hints provided within XML data. Set this option to `False` to ignore these schema location hints.
|
def validate(xml_document: Union[XMLSourceType, XMLResource],
schema: Optional[XMLSchemaBase] = None,
cls: Optional[Type[XMLSchemaBase]] = None,
path: Optional[str] = None,
schema_path: Optional[str] = None,
use_defaults: bool = True,
namespaces: Optional[NamespacesType] = None,
locations: Optional[LocationsType] = None,
base_url: Optional[str] = None,
defuse: str = 'remote',
timeout: int = 300,
lazy: LazyType = False,
thin_lazy: bool = True,
uri_mapper: Optional[UriMapperType] = None,
use_location_hints: bool = True) -> None:
"""
Validates an XML document against a schema instance. This function builds an
:class:`XMLSchema` object for validating the XML document. Raises an
:exc:`XMLSchemaValidationError` if the XML document is not validated against
the schema.
:param xml_document: can be an :class:`XMLResource` instance, a file-like object a path \
to a file or a URI of a resource or an Element instance or an ElementTree instance or \
a string containing the XML data. If the passed argument is not an :class:`XMLResource` \
instance a new one is built using this and *defuse*, *timeout* and *lazy* arguments.
:param schema: can be a schema instance or a file-like object or a file path or a URL \
of a resource or a string containing the schema.
:param cls: class to use for building the schema instance (for default \
:class:`XMLSchema10` is used).
:param path: is an optional XPath expression that matches the elements of the XML \
data that have to be decoded. If not provided the XML root element is used.
:param schema_path: an XPath expression to select the XSD element to use for decoding. \
If not provided the *path* argument or the *source* root tag are used.
:param use_defaults: defines when to use element and attribute defaults for filling \
missing required values.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param locations: additional schema location hints, used if a schema instance \
has to be built.
:param base_url: is an optional custom base URL for remapping relative locations, for \
default uses the directory where the XSD or alternatively the XML document is located.
:param defuse: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param timeout: an optional argument for building the schema and the \
:class:`XMLResource` instance.
:param lazy: an optional argument for building the :class:`XMLResource` instance.
:param thin_lazy: an optional argument for building the :class:`XMLResource` instance.
:param uri_mapper: an optional argument for building the schema from location hints.
:param use_location_hints: for default, in case a schema instance has \
to be built, uses also schema locations hints provided within XML data. \
Set this option to `False` to ignore these schema location hints.
"""
source, schema = get_context(xml_document, schema, cls, locations, base_url,
defuse, timeout, lazy, thin_lazy, uri_mapper,
use_location_hints)
schema.validate(source, path, schema_path, use_defaults, namespaces,
use_location_hints=use_location_hints)
|
(xml_document: Union[~T, NoneType, xmlschema.resources.XMLResource], schema: Optional[xmlschema.validators.schemas.XMLSchemaBase] = None, cls: Optional[Type[xmlschema.validators.schemas.XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[~T] = None, locations: Optional[~T] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: Optional[~T] = False, thin_lazy: bool = True, uri_mapper: Optional[~T] = None, use_location_hints: bool = True) -> NoneType
|
713,984
|
keybert._model
|
KeyBERT
|
A minimal method for keyword extraction with BERT
The keyword extraction is done by finding the sub-phrases in
a document that are the most similar to the document itself.
First, document embeddings are extracted with BERT to get a
document-level representation. Then, word embeddings are extracted
for N-gram words/phrases. Finally, we use cosine similarity to find the
words/phrases that are the most similar to the document.
The most similar words could then be identified as the words that
best describe the entire document.
<div class="excalidraw">
--8<-- "docs/images/pipeline.svg"
</div>
|
class KeyBERT:
"""
A minimal method for keyword extraction with BERT
The keyword extraction is done by finding the sub-phrases in
a document that are the most similar to the document itself.
First, document embeddings are extracted with BERT to get a
document-level representation. Then, word embeddings are extracted
for N-gram words/phrases. Finally, we use cosine similarity to find the
words/phrases that are the most similar to the document.
The most similar words could then be identified as the words that
best describe the entire document.
<div class="excalidraw">
--8<-- "docs/images/pipeline.svg"
</div>
"""
def __init__(self, model="all-MiniLM-L6-v2", llm: BaseLLM = None):
"""KeyBERT initialization
Arguments:
model: Use a custom embedding model.
The following backends are currently supported:
* SentenceTransformers
* 🤗 Transformers
* Flair
* Spacy
* Gensim
* USE (TF-Hub)
You can also pass in a string that points to one of the following
sentence-transformers models:
* https://www.sbert.net/docs/pretrained_models.html
"""
self.model = select_backend(model)
if isinstance(llm, BaseLLM):
self.llm = KeyLLM(llm)
else:
self.llm = llm
def extract_keywords(
self,
docs: Union[str, List[str]],
candidates: List[str] = None,
keyphrase_ngram_range: Tuple[int, int] = (1, 1),
stop_words: Union[str, List[str]] = "english",
top_n: int = 5,
min_df: int = 1,
use_maxsum: bool = False,
use_mmr: bool = False,
diversity: float = 0.5,
nr_candidates: int = 20,
vectorizer: CountVectorizer = None,
highlight: bool = False,
seed_keywords: Union[List[str], List[List[str]]] = None,
doc_embeddings: np.array = None,
word_embeddings: np.array = None,
threshold: float = None
) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
"""Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
top_n: Return the top n keywords/keyphrases
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
use_maxsum: Whether to use Max Sum Distance for the selection
of keywords/keyphrases.
use_mmr: Whether to use Maximal Marginal Relevance (MMR) for the
selection of keywords/keyphrases.
diversity: The diversity of the results between 0 and 1 if `use_mmr`
is set to True.
nr_candidates: The number of candidates to consider if `use_maxsum` is
set to True.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
highlight: Whether to print the document and highlight its keywords/keyphrases.
NOTE: This does not work if multiple documents are passed.
seed_keywords: Seed keywords that may guide the extraction of keywords by
steering the similarities towards the seeded keywords.
NOTE: when multiple documents are passed,
`seed_keywords`funtions in either of the two ways below:
- globally: when a flat list of str is passed, keywords are shared by all documents,
- locally: when a nested list of str is passed, keywords differs among documents.
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(doc)
```
To extract keywords from multiple documents, which is typically quite a bit faster:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(docs)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
# Extract potential words using a vectorizer / tokenizer
if vectorizer:
count = vectorizer.fit(docs)
else:
try:
count = CountVectorizer(
ngram_range=keyphrase_ngram_range,
stop_words=stop_words,
min_df=min_df,
vocabulary=candidates,
).fit(docs)
except ValueError:
return []
# Scikit-Learn Deprecation: get_feature_names is deprecated in 1.0
# and will be removed in 1.2. Please use get_feature_names_out instead.
if version.parse(sklearn_version) >= version.parse("1.0.0"):
words = count.get_feature_names_out()
else:
words = count.get_feature_names()
df = count.transform(docs)
# Check if the right number of word embeddings are generated compared with the vectorizer
if word_embeddings is not None:
if word_embeddings.shape[0] != len(words):
raise ValueError("Make sure that the `word_embeddings` are generated from the function "
"`.extract_embeddings`. \nMoreover, the `candidates`, `keyphrase_ngram_range`,"
"`stop_words`, and `min_df` parameters need to have the same values in both "
"`.extract_embeddings` and `.extract_keywords`.")
# Extract embeddings
if doc_embeddings is None:
doc_embeddings = self.model.embed(docs)
if word_embeddings is None:
word_embeddings = self.model.embed(words)
# Guided KeyBERT either local (keywords shared among documents) or global (keywords per document)
if seed_keywords is not None:
if isinstance(seed_keywords[0], str):
seed_embeddings = self.model.embed(seed_keywords).mean(axis=0, keepdims=True)
elif len(docs) != len(seed_keywords):
raise ValueError("The length of docs must match the length of seed_keywords")
else:
seed_embeddings = np.vstack([
self.model.embed(keywords).mean(axis=0, keepdims=True)
for keywords in seed_keywords
])
doc_embeddings = ((doc_embeddings * 3 + seed_embeddings) / 4)
# Find keywords
all_keywords = []
for index, _ in enumerate(docs):
try:
# Select embeddings
candidate_indices = df[index].nonzero()[1]
candidates = [words[index] for index in candidate_indices]
candidate_embeddings = word_embeddings[candidate_indices]
doc_embedding = doc_embeddings[index].reshape(1, -1)
# Maximal Marginal Relevance (MMR)
if use_mmr:
keywords = mmr(
doc_embedding,
candidate_embeddings,
candidates,
top_n,
diversity,
)
# Max Sum Distance
elif use_maxsum:
keywords = max_sum_distance(
doc_embedding,
candidate_embeddings,
candidates,
top_n,
nr_candidates,
)
# Cosine-based keyword extraction
else:
distances = cosine_similarity(doc_embedding, candidate_embeddings)
keywords = [
(candidates[index], round(float(distances[0][index]), 4))
for index in distances.argsort()[0][-top_n:]
][::-1]
all_keywords.append(keywords)
# Capturing empty keywords
except ValueError:
all_keywords.append([])
# Highlight keywords in the document
if len(all_keywords) == 1:
if highlight:
highlight_document(docs[0], all_keywords[0], count)
all_keywords = all_keywords[0]
# Fine-tune keywords using an LLM
if self.llm is not None:
import torch
doc_embeddings = torch.from_numpy(doc_embeddings).float()
if torch.cuda.is_available():
doc_embeddings = doc_embeddings.to("cuda")
if isinstance(all_keywords[0], tuple):
candidate_keywords = [[keyword[0] for keyword in all_keywords]]
else:
candidate_keywords = [[keyword[0] for keyword in keywords] for keywords in all_keywords]
keywords = self.llm.extract_keywords(
docs,
embeddings=doc_embeddings,
candidate_keywords=candidate_keywords,
threshold=threshold
)
return keywords
return all_keywords
def extract_embeddings(
self,
docs: Union[str, List[str]],
candidates: List[str] = None,
keyphrase_ngram_range: Tuple[int, int] = (1, 1),
stop_words: Union[str, List[str]] = "english",
min_df: int = 1,
vectorizer: CountVectorizer = None
) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
"""Extract document and word embeddings for the input documents and the
generated candidate keywords/keyphrases respectively.
Note that all potential keywords/keyphrases are not returned but only their
word embeddings. This means that the values of `candidates`, `keyphrase_ngram_range`,
`stop_words`, and `min_df` need to be the same between using `.extract_embeddings` and
`.extract_keywords`.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
Returns:
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Usage:
To generate the word and document embeddings from a set of documents:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
doc_embeddings, word_embeddings = kw_model.extract_embeddings(docs)
```
You can then use these embeddings and pass them to `.extract_keywords` to speed up the tuning the model:
```python
keywords = kw_model.extract_keywords(docs, doc_embeddings=doc_embeddings, word_embeddings=word_embeddings)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
# Extract potential words using a vectorizer / tokenizer
if vectorizer:
count = vectorizer.fit(docs)
else:
try:
count = CountVectorizer(
ngram_range=keyphrase_ngram_range,
stop_words=stop_words,
min_df=min_df,
vocabulary=candidates,
).fit(docs)
except ValueError:
return []
# Scikit-Learn Deprecation: get_feature_names is deprecated in 1.0
# and will be removed in 1.2. Please use get_feature_names_out instead.
if version.parse(sklearn_version) >= version.parse("1.0.0"):
words = count.get_feature_names_out()
else:
words = count.get_feature_names()
doc_embeddings = self.model.embed(docs)
word_embeddings = self.model.embed(words)
return doc_embeddings, word_embeddings
|
(model='all-MiniLM-L6-v2', llm: keybert.llm._base.BaseLLM = None)
|
713,985
|
keybert._model
|
__init__
|
KeyBERT initialization
Arguments:
model: Use a custom embedding model.
The following backends are currently supported:
* SentenceTransformers
* 🤗 Transformers
* Flair
* Spacy
* Gensim
* USE (TF-Hub)
You can also pass in a string that points to one of the following
sentence-transformers models:
* https://www.sbert.net/docs/pretrained_models.html
|
def __init__(self, model="all-MiniLM-L6-v2", llm: BaseLLM = None):
"""KeyBERT initialization
Arguments:
model: Use a custom embedding model.
The following backends are currently supported:
* SentenceTransformers
* 🤗 Transformers
* Flair
* Spacy
* Gensim
* USE (TF-Hub)
You can also pass in a string that points to one of the following
sentence-transformers models:
* https://www.sbert.net/docs/pretrained_models.html
"""
self.model = select_backend(model)
if isinstance(llm, BaseLLM):
self.llm = KeyLLM(llm)
else:
self.llm = llm
|
(self, model='all-MiniLM-L6-v2', llm: Optional[keybert.llm._base.BaseLLM] = None)
|
713,986
|
keybert._model
|
extract_embeddings
|
Extract document and word embeddings for the input documents and the
generated candidate keywords/keyphrases respectively.
Note that all potential keywords/keyphrases are not returned but only their
word embeddings. This means that the values of `candidates`, `keyphrase_ngram_range`,
`stop_words`, and `min_df` need to be the same between using `.extract_embeddings` and
`.extract_keywords`.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
Returns:
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Usage:
To generate the word and document embeddings from a set of documents:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
doc_embeddings, word_embeddings = kw_model.extract_embeddings(docs)
```
You can then use these embeddings and pass them to `.extract_keywords` to speed up the tuning the model:
```python
keywords = kw_model.extract_keywords(docs, doc_embeddings=doc_embeddings, word_embeddings=word_embeddings)
```
|
def extract_embeddings(
self,
docs: Union[str, List[str]],
candidates: List[str] = None,
keyphrase_ngram_range: Tuple[int, int] = (1, 1),
stop_words: Union[str, List[str]] = "english",
min_df: int = 1,
vectorizer: CountVectorizer = None
) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
"""Extract document and word embeddings for the input documents and the
generated candidate keywords/keyphrases respectively.
Note that all potential keywords/keyphrases are not returned but only their
word embeddings. This means that the values of `candidates`, `keyphrase_ngram_range`,
`stop_words`, and `min_df` need to be the same between using `.extract_embeddings` and
`.extract_keywords`.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
Returns:
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Usage:
To generate the word and document embeddings from a set of documents:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
doc_embeddings, word_embeddings = kw_model.extract_embeddings(docs)
```
You can then use these embeddings and pass them to `.extract_keywords` to speed up the tuning the model:
```python
keywords = kw_model.extract_keywords(docs, doc_embeddings=doc_embeddings, word_embeddings=word_embeddings)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
# Extract potential words using a vectorizer / tokenizer
if vectorizer:
count = vectorizer.fit(docs)
else:
try:
count = CountVectorizer(
ngram_range=keyphrase_ngram_range,
stop_words=stop_words,
min_df=min_df,
vocabulary=candidates,
).fit(docs)
except ValueError:
return []
# Scikit-Learn Deprecation: get_feature_names is deprecated in 1.0
# and will be removed in 1.2. Please use get_feature_names_out instead.
if version.parse(sklearn_version) >= version.parse("1.0.0"):
words = count.get_feature_names_out()
else:
words = count.get_feature_names()
doc_embeddings = self.model.embed(docs)
word_embeddings = self.model.embed(words)
return doc_embeddings, word_embeddings
|
(self, docs: Union[str, List[str]], candidates: Optional[List[str]] = None, keyphrase_ngram_range: Tuple[int, int] = (1, 1), stop_words: Union[str, List[str]] = 'english', min_df: int = 1, vectorizer: Optional[sklearn.feature_extraction.text.CountVectorizer] = None) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]
|
713,987
|
keybert._model
|
extract_keywords
|
Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
top_n: Return the top n keywords/keyphrases
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
use_maxsum: Whether to use Max Sum Distance for the selection
of keywords/keyphrases.
use_mmr: Whether to use Maximal Marginal Relevance (MMR) for the
selection of keywords/keyphrases.
diversity: The diversity of the results between 0 and 1 if `use_mmr`
is set to True.
nr_candidates: The number of candidates to consider if `use_maxsum` is
set to True.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
highlight: Whether to print the document and highlight its keywords/keyphrases.
NOTE: This does not work if multiple documents are passed.
seed_keywords: Seed keywords that may guide the extraction of keywords by
steering the similarities towards the seeded keywords.
NOTE: when multiple documents are passed,
`seed_keywords`funtions in either of the two ways below:
- globally: when a flat list of str is passed, keywords are shared by all documents,
- locally: when a nested list of str is passed, keywords differs among documents.
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(doc)
```
To extract keywords from multiple documents, which is typically quite a bit faster:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(docs)
```
|
def extract_keywords(
self,
docs: Union[str, List[str]],
candidates: List[str] = None,
keyphrase_ngram_range: Tuple[int, int] = (1, 1),
stop_words: Union[str, List[str]] = "english",
top_n: int = 5,
min_df: int = 1,
use_maxsum: bool = False,
use_mmr: bool = False,
diversity: float = 0.5,
nr_candidates: int = 20,
vectorizer: CountVectorizer = None,
highlight: bool = False,
seed_keywords: Union[List[str], List[List[str]]] = None,
doc_embeddings: np.array = None,
word_embeddings: np.array = None,
threshold: float = None
) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
"""Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
candidates: Candidate keywords/keyphrases to use instead of extracting them from the document(s)
NOTE: This is not used if you passed a `vectorizer`.
keyphrase_ngram_range: Length, in words, of the extracted keywords/keyphrases.
NOTE: This is not used if you passed a `vectorizer`.
stop_words: Stopwords to remove from the document.
NOTE: This is not used if you passed a `vectorizer`.
top_n: Return the top n keywords/keyphrases
min_df: Minimum document frequency of a word across all documents
if keywords for multiple documents need to be extracted.
NOTE: This is not used if you passed a `vectorizer`.
use_maxsum: Whether to use Max Sum Distance for the selection
of keywords/keyphrases.
use_mmr: Whether to use Maximal Marginal Relevance (MMR) for the
selection of keywords/keyphrases.
diversity: The diversity of the results between 0 and 1 if `use_mmr`
is set to True.
nr_candidates: The number of candidates to consider if `use_maxsum` is
set to True.
vectorizer: Pass in your own `CountVectorizer` from
`sklearn.feature_extraction.text.CountVectorizer`
highlight: Whether to print the document and highlight its keywords/keyphrases.
NOTE: This does not work if multiple documents are passed.
seed_keywords: Seed keywords that may guide the extraction of keywords by
steering the similarities towards the seeded keywords.
NOTE: when multiple documents are passed,
`seed_keywords`funtions in either of the two ways below:
- globally: when a flat list of str is passed, keywords are shared by all documents,
- locally: when a nested list of str is passed, keywords differs among documents.
doc_embeddings: The embeddings of each document.
word_embeddings: The embeddings of each potential keyword/keyphrase across
across the vocabulary of the set of input documents.
NOTE: The `word_embeddings` should be generated through
`.extract_embeddings` as the order of these embeddings depend
on the vectorizer that was used to generate its vocabulary.
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(doc)
```
To extract keywords from multiple documents, which is typically quite a bit faster:
```python
from keybert import KeyBERT
kw_model = KeyBERT()
keywords = kw_model.extract_keywords(docs)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
# Extract potential words using a vectorizer / tokenizer
if vectorizer:
count = vectorizer.fit(docs)
else:
try:
count = CountVectorizer(
ngram_range=keyphrase_ngram_range,
stop_words=stop_words,
min_df=min_df,
vocabulary=candidates,
).fit(docs)
except ValueError:
return []
# Scikit-Learn Deprecation: get_feature_names is deprecated in 1.0
# and will be removed in 1.2. Please use get_feature_names_out instead.
if version.parse(sklearn_version) >= version.parse("1.0.0"):
words = count.get_feature_names_out()
else:
words = count.get_feature_names()
df = count.transform(docs)
# Check if the right number of word embeddings are generated compared with the vectorizer
if word_embeddings is not None:
if word_embeddings.shape[0] != len(words):
raise ValueError("Make sure that the `word_embeddings` are generated from the function "
"`.extract_embeddings`. \nMoreover, the `candidates`, `keyphrase_ngram_range`,"
"`stop_words`, and `min_df` parameters need to have the same values in both "
"`.extract_embeddings` and `.extract_keywords`.")
# Extract embeddings
if doc_embeddings is None:
doc_embeddings = self.model.embed(docs)
if word_embeddings is None:
word_embeddings = self.model.embed(words)
# Guided KeyBERT either local (keywords shared among documents) or global (keywords per document)
if seed_keywords is not None:
if isinstance(seed_keywords[0], str):
seed_embeddings = self.model.embed(seed_keywords).mean(axis=0, keepdims=True)
elif len(docs) != len(seed_keywords):
raise ValueError("The length of docs must match the length of seed_keywords")
else:
seed_embeddings = np.vstack([
self.model.embed(keywords).mean(axis=0, keepdims=True)
for keywords in seed_keywords
])
doc_embeddings = ((doc_embeddings * 3 + seed_embeddings) / 4)
# Find keywords
all_keywords = []
for index, _ in enumerate(docs):
try:
# Select embeddings
candidate_indices = df[index].nonzero()[1]
candidates = [words[index] for index in candidate_indices]
candidate_embeddings = word_embeddings[candidate_indices]
doc_embedding = doc_embeddings[index].reshape(1, -1)
# Maximal Marginal Relevance (MMR)
if use_mmr:
keywords = mmr(
doc_embedding,
candidate_embeddings,
candidates,
top_n,
diversity,
)
# Max Sum Distance
elif use_maxsum:
keywords = max_sum_distance(
doc_embedding,
candidate_embeddings,
candidates,
top_n,
nr_candidates,
)
# Cosine-based keyword extraction
else:
distances = cosine_similarity(doc_embedding, candidate_embeddings)
keywords = [
(candidates[index], round(float(distances[0][index]), 4))
for index in distances.argsort()[0][-top_n:]
][::-1]
all_keywords.append(keywords)
# Capturing empty keywords
except ValueError:
all_keywords.append([])
# Highlight keywords in the document
if len(all_keywords) == 1:
if highlight:
highlight_document(docs[0], all_keywords[0], count)
all_keywords = all_keywords[0]
# Fine-tune keywords using an LLM
if self.llm is not None:
import torch
doc_embeddings = torch.from_numpy(doc_embeddings).float()
if torch.cuda.is_available():
doc_embeddings = doc_embeddings.to("cuda")
if isinstance(all_keywords[0], tuple):
candidate_keywords = [[keyword[0] for keyword in all_keywords]]
else:
candidate_keywords = [[keyword[0] for keyword in keywords] for keywords in all_keywords]
keywords = self.llm.extract_keywords(
docs,
embeddings=doc_embeddings,
candidate_keywords=candidate_keywords,
threshold=threshold
)
return keywords
return all_keywords
|
(self, docs: Union[str, List[str]], candidates: Optional[List[str]] = None, keyphrase_ngram_range: Tuple[int, int] = (1, 1), stop_words: Union[str, List[str]] = 'english', top_n: int = 5, min_df: int = 1, use_maxsum: bool = False, use_mmr: bool = False, diversity: float = 0.5, nr_candidates: int = 20, vectorizer: Optional[sklearn.feature_extraction.text.CountVectorizer] = None, highlight: bool = False, seed_keywords: Union[List[str], List[List[str]], NoneType] = None, doc_embeddings: Optional[<built-in function array>] = None, word_embeddings: Optional[<built-in function array>] = None, threshold: Optional[float] = None) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]
|
713,988
|
keybert._llm
|
KeyLLM
|
A minimal method for keyword extraction with Large Language Models (LLM)
The keyword extraction is done by simply asking the LLM to extract a
number of keywords from a single piece of text.
|
class KeyLLM:
"""
A minimal method for keyword extraction with Large Language Models (LLM)
The keyword extraction is done by simply asking the LLM to extract a
number of keywords from a single piece of text.
"""
def __init__(self, llm):
"""KeyBERT initialization
Arguments:
llm: The Large Language Model to use
"""
self.llm = llm
def extract_keywords(
self,
docs: Union[str, List[str]],
check_vocab: bool = False,
candidate_keywords: List[List[str]] = None,
threshold: float = None,
embeddings=None
) -> Union[List[str], List[List[str]]]:
"""Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
NOTE: The resulting keywords are expected to be separated by commas so
any changes to the prompt will have to make sure that the resulting
keywords are comma-separated.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
check_vocab: Only return keywords that appear exactly in the documents
candidate_keywords: Candidate keywords for each document
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
import openai
from keybert.llm import OpenAI
from keybert import KeyLLM
# Create your LLM
client = openai.OpenAI(api_key=MY_API_KEY)
llm = OpenAI(client)
# Load it in KeyLLM
kw_model = KeyLLM(llm)
# Extract keywords
document = "The website mentions that it only takes a couple of days to deliver but I still have not received mine."
keywords = kw_model.extract_keywords(document)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
if HAS_SBERT and threshold is not None and embeddings is not None:
# Find similar documents
clusters = util.community_detection(embeddings, min_community_size=2, threshold=threshold)
in_cluster = set([cluster for cluster_set in clusters for cluster in cluster_set])
out_cluster = set(list(range(len(docs)))).difference(in_cluster)
# Extract keywords for all documents not in a cluster
if out_cluster:
selected_docs = [docs[index] for index in out_cluster]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[index] for index in out_cluster]
else:
selected_keywords = None
out_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords,
)
out_cluster_keywords = {index: words for words, index in zip(out_cluster_keywords, out_cluster)}
# Extract keywords for only the first document in a cluster
if in_cluster:
selected_docs = [docs[cluster[0]] for cluster in clusters]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[cluster[0]] for cluster in clusters]
else:
selected_keywords = None
in_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords
)
in_cluster_keywords = {
doc_id: in_cluster_keywords[index]
for index, cluster in enumerate(clusters)
for doc_id in cluster
}
# Update out cluster keywords with in cluster keywords
if out_cluster:
if in_cluster:
out_cluster_keywords.update(in_cluster_keywords)
keywords = [out_cluster_keywords[index] for index in range(len(docs))]
else:
keywords = [in_cluster_keywords[index] for index in range(len(docs))]
else:
# Extract keywords using a Large Language Model (LLM)
keywords = self.llm.extract_keywords(docs, candidate_keywords)
# Only extract keywords that appear in the input document
if check_vocab:
updated_keywords = []
for keyword_set, document in zip(keywords, docs):
updated_keyword_set = []
for keyword in keyword_set:
if keyword in document:
updated_keyword_set.append(keyword)
updated_keywords.append(updated_keyword_set)
return updated_keywords
return keywords
|
(llm)
|
713,989
|
keybert._llm
|
__init__
|
KeyBERT initialization
Arguments:
llm: The Large Language Model to use
|
def __init__(self, llm):
"""KeyBERT initialization
Arguments:
llm: The Large Language Model to use
"""
self.llm = llm
|
(self, llm)
|
713,990
|
keybert._llm
|
extract_keywords
|
Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
NOTE: The resulting keywords are expected to be separated by commas so
any changes to the prompt will have to make sure that the resulting
keywords are comma-separated.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
check_vocab: Only return keywords that appear exactly in the documents
candidate_keywords: Candidate keywords for each document
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
import openai
from keybert.llm import OpenAI
from keybert import KeyLLM
# Create your LLM
client = openai.OpenAI(api_key=MY_API_KEY)
llm = OpenAI(client)
# Load it in KeyLLM
kw_model = KeyLLM(llm)
# Extract keywords
document = "The website mentions that it only takes a couple of days to deliver but I still have not received mine."
keywords = kw_model.extract_keywords(document)
```
|
def extract_keywords(
self,
docs: Union[str, List[str]],
check_vocab: bool = False,
candidate_keywords: List[List[str]] = None,
threshold: float = None,
embeddings=None
) -> Union[List[str], List[List[str]]]:
"""Extract keywords and/or keyphrases
To get the biggest speed-up, make sure to pass multiple documents
at once instead of iterating over a single document.
NOTE: The resulting keywords are expected to be separated by commas so
any changes to the prompt will have to make sure that the resulting
keywords are comma-separated.
Arguments:
docs: The document(s) for which to extract keywords/keyphrases
check_vocab: Only return keywords that appear exactly in the documents
candidate_keywords: Candidate keywords for each document
Returns:
keywords: The top n keywords for a document with their respective distances
to the input document.
Usage:
To extract keywords from a single document:
```python
import openai
from keybert.llm import OpenAI
from keybert import KeyLLM
# Create your LLM
client = openai.OpenAI(api_key=MY_API_KEY)
llm = OpenAI(client)
# Load it in KeyLLM
kw_model = KeyLLM(llm)
# Extract keywords
document = "The website mentions that it only takes a couple of days to deliver but I still have not received mine."
keywords = kw_model.extract_keywords(document)
```
"""
# Check for a single, empty document
if isinstance(docs, str):
if docs:
docs = [docs]
else:
return []
if HAS_SBERT and threshold is not None and embeddings is not None:
# Find similar documents
clusters = util.community_detection(embeddings, min_community_size=2, threshold=threshold)
in_cluster = set([cluster for cluster_set in clusters for cluster in cluster_set])
out_cluster = set(list(range(len(docs)))).difference(in_cluster)
# Extract keywords for all documents not in a cluster
if out_cluster:
selected_docs = [docs[index] for index in out_cluster]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[index] for index in out_cluster]
else:
selected_keywords = None
out_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords,
)
out_cluster_keywords = {index: words for words, index in zip(out_cluster_keywords, out_cluster)}
# Extract keywords for only the first document in a cluster
if in_cluster:
selected_docs = [docs[cluster[0]] for cluster in clusters]
if candidate_keywords is not None:
selected_keywords = [candidate_keywords[cluster[0]] for cluster in clusters]
else:
selected_keywords = None
in_cluster_keywords = self.llm.extract_keywords(
selected_docs,
selected_keywords
)
in_cluster_keywords = {
doc_id: in_cluster_keywords[index]
for index, cluster in enumerate(clusters)
for doc_id in cluster
}
# Update out cluster keywords with in cluster keywords
if out_cluster:
if in_cluster:
out_cluster_keywords.update(in_cluster_keywords)
keywords = [out_cluster_keywords[index] for index in range(len(docs))]
else:
keywords = [in_cluster_keywords[index] for index in range(len(docs))]
else:
# Extract keywords using a Large Language Model (LLM)
keywords = self.llm.extract_keywords(docs, candidate_keywords)
# Only extract keywords that appear in the input document
if check_vocab:
updated_keywords = []
for keyword_set, document in zip(keywords, docs):
updated_keyword_set = []
for keyword in keyword_set:
if keyword in document:
updated_keyword_set.append(keyword)
updated_keywords.append(updated_keyword_set)
return updated_keywords
return keywords
|
(self, docs: Union[str, List[str]], check_vocab: bool = False, candidate_keywords: Optional[List[List[str]]] = None, threshold: Optional[float] = None, embeddings=None) -> Union[List[str], List[List[str]]]
|
713,999
|
pyjdbc.connect
|
ArgumentOpts
|
Describes a single arguments name and settings
|
class ArgumentOpts:
"""
Describes a single arguments name and settings
"""
def __init__(self,
name=None,
position=None,
argtype=None,
mandatory=None,
requires=None,
excludes=None,
default=None,
description=None,
secret=None,
choices=None,
fn=None):
if position is not None and not isinstance(position, int):
raise ValueError('{}: position must be `None` or `int`, got: {}'.format(name, type(position)))
# positional arguments are mandatory
if position is not None:
mandatory=True
if excludes is None:
excludes = []
if requires is None:
requires = []
if choices is None:
choices = []
if name is not None and not name.isidentifier():
raise ValueError('argument `name` is not a valid python identifier name: {}'.format(name))
if fn is not None and not callable(fn):
raise ValueError('{}: fn must be `None` or a callable, got: {}'.format(name, type(fn)))
if default is not None and mandatory:
raise ValueError('{}: [mandatory=True] cannot be set when [default={}] is set'.format(name, default))
self._name = name
self._position = position
self._argtype = argtype
self._mandatory = mandatory
self._requires = requires
self._excludes = excludes
self._default = default
self._description = description
self._secret = secret
self._choices = choices
self._fn = fn
@property
def name(self):
"""
the name of the argument as it would appear in **kwargs
:return: arg parameter name
:rtype: str
"""
return self._name
@name.setter
def name(self, keyword):
self._name = keyword
@property
def position(self):
"""
:return: index of this argument in *args list (or None)
:rtype: int
"""
return self._position
@property
def argtype(self):
return self._argtype
@property
def requires(self):
"""
Argument names that must be included when this argument is present
:return: required names
:rtype: list
"""
return self._requires
@property
def excludes(self):
"""
Argument names that must be excluded when this argument is present
:return: excluded names
:rtype: list
"""
return self._excludes
@property
def mandatory(self):
return self._mandatory
@mandatory.setter
def mandatory(self, is_mandatory):
self._mandatory = is_mandatory
@property
def default(self):
return self._default
@property
def description(self):
return self._description
@property
def choices(self):
return self._choices
@property
def secret(self):
return self._secret
@property
def fn(self):
return self._fn
|
(name=None, position=None, argtype=None, mandatory=None, requires=None, excludes=None, default=None, description=None, secret=None, choices=None, fn=None)
|
714,000
|
pyjdbc.connect
|
__init__
| null |
def __init__(self,
name=None,
position=None,
argtype=None,
mandatory=None,
requires=None,
excludes=None,
default=None,
description=None,
secret=None,
choices=None,
fn=None):
if position is not None and not isinstance(position, int):
raise ValueError('{}: position must be `None` or `int`, got: {}'.format(name, type(position)))
# positional arguments are mandatory
if position is not None:
mandatory=True
if excludes is None:
excludes = []
if requires is None:
requires = []
if choices is None:
choices = []
if name is not None and not name.isidentifier():
raise ValueError('argument `name` is not a valid python identifier name: {}'.format(name))
if fn is not None and not callable(fn):
raise ValueError('{}: fn must be `None` or a callable, got: {}'.format(name, type(fn)))
if default is not None and mandatory:
raise ValueError('{}: [mandatory=True] cannot be set when [default={}] is set'.format(name, default))
self._name = name
self._position = position
self._argtype = argtype
self._mandatory = mandatory
self._requires = requires
self._excludes = excludes
self._default = default
self._description = description
self._secret = secret
self._choices = choices
self._fn = fn
|
(self, name=None, position=None, argtype=None, mandatory=None, requires=None, excludes=None, default=None, description=None, secret=None, choices=None, fn=None)
|
714,001
|
pyjdbc.connect
|
ArgumentParser
|
Parses connection arguments, should be subclassed to implement your own arguments requirements
|
class ArgumentParser:
"""
Parses connection arguments, should be subclassed to implement your own arguments requirements
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._named_args = {}
self._position_args = {}
def _register_args(self):
# walk the class hierarchy in reverse
for cls in reversed(type(self).mro()):
for obj_name, obj in cls.__dict__.items():
arg = getattr(obj, DECORATOR_ATTRIBUTE, None)
if isinstance(obj, ArgumentOpts):
arg = obj
if not isinstance(arg, ArgumentOpts):
continue
# if the argument has no name, the property that defines it is the name
if not arg.name:
arg.name = obj_name
if arg.name in self._named_args:
raise NameError('argument is already defined: {}'.format(arg.name))
self._named_args[arg.name] = arg
self._register_positional()
def _register_positional(self):
# ensure there are not duplicate positions
for arg in self._named_args.values():
if arg.position is None:
continue
self._position_args[arg.position] = arg
if self._position_args:
pmax = max(self._position_args)
inclusive = set(range(0, pmax+1))
non_sequential = set(self._position_args) ^ inclusive
if non_sequential:
raise ValueError('positional arguments are not sequential, '
'these positions are missing: {}'.format(list(non_sequential)))
def _parse_args(self, *args, **kwargs):
# TODO test for non-existant fields in requires, and excludes
if self._position_args:
num_positional_args = max(self._position_args.keys()) + 1
else:
num_positional_args = 0
if len(args) > num_positional_args:
raise ValueError('too many positional arguments, accepts at most: {}'.format(num_positional_args))
# map argument-name to value for *args
pos_values = {self._position_args[idx].name: val for idx, val in enumerate(args)}
keyword_values = {}
for name, value in kwargs.items():
if name not in self._named_args:
valid_keywords = '\n'.join(self._named_args)
raise ValueError('invalid keyword argument: "{}", valid names:\n{}'.format(name, valid_keywords))
if name in pos_values:
raise ValueError('argument repeated by position and keyword: "{}"'.format(name))
keyword_values[name] = value
keyword_values.update(pos_values)
# check for missing mandatory arguments
mandatory = [arg.name for arg in self._named_args.values() if arg.mandatory and not arg.default]
missing = set(mandatory) - set(keyword_values)
if missing:
missing_str = '\n'.join(missing)
raise ValueError('required arguments missing:\n{}\n'
'note that all positional arguments are mandatory'.format(
textwrap.indent(missing_str, ' '*4)))
# check for valid types:
for name, value in keyword_values.items():
arg = self._named_args[name]
if arg.argtype is None or value is None:
continue
# if argtype is a CLASS object test for subclass
try:
is_subclass = issubclass(value, arg.argtype)
except TypeError:
is_subclass = False
# check the argument type, unless there is a function defined
if not inspect.isclass(arg.argtype) and not isinstance(value, arg.argtype) and not is_subclass:
raise TypeError('argument type invalid: "{}" expected {}, got: {}'.format(
name,
type(arg.argtype),
type(value)
))
# check for includes/excludes
for name in keyword_values:
requires = self._named_args[name].requires
excludes = self._named_args[name].excludes
if requires:
missing_required = set(requires) - set(keyword_values.keys())
if missing_required:
missing_required = ', '.join(missing_required)
raise ValueError('argument: "{}" requires these args to ALSO be set: {}'.format(
name, missing_required))
if excludes:
present_excludes = set(excludes) & set(keyword_values.keys())
if present_excludes:
present_excludes = ', '.join(present_excludes)
raise ValueError('argument: "{}" requires these args to NOT be set: {}'.format(
name, present_excludes))
# apply defaults
for name, arg in self._named_args.items():
if arg.default is not None and keyword_values.get(name) is None:
keyword_values[name] = arg.default
# apply functions
for name in keyword_values:
arg = self._named_args[name]
current_value = keyword_values[name]
# don't compute functions for "null" values
if current_value is None or arg.fn is None:
continue
# call the decorated function
new_value = arg.fn(self, current_value)
if arg.argtype is not None:
if not isinstance(new_value, arg.argtype) and not arg.fn is None:
raise TypeError('argument function returned type invalid: "{}" expected {}, got: {}\n'.format(
name,
type(arg.argtype),
type(new_value)
))
keyword_values[name] = new_value
for name, value in keyword_values.items():
arg = self._named_args[name]
if not arg.choices:
continue
if value not in arg.choices:
raise ValueError('argument "{}" value "{}" invalid, must be one of: ({})'.format(
name,
value,
', '.join(arg.choices)
))
return keyword_values
def add(self,
name,
position=None,
argtype=None,
mandatory=None,
requires=None,
excludes=None,
default=None,
description=None):
"""
Add an argument to the parser
:param name: the name of the argument, any kwarg with this name will be accepted
:param position: (optional) if you want to support this variable as a positional argument
:param argtype: (optional) the data type of the argument. `isinstance` will be used to test for this type
:param mandatory: (optional) indicates is this is a required argument
:param requires: (optional) a sequence of other fields by `name` that are required if this field is set
:param excludes: (optional) a sequence of other fields by `name` that cannot be set if this field is defined
:param default: (optional) defines a default value
:param description: (optional) the description of the field
:return:
"""
if not isinstance(name, str):
raise TypeError('name must be string, got: {}'.format(type(name)))
if name in self._named_args:
raise NameError('argument is already defined: {}'.format(name))
self._named_args[name] = ArgumentOpts(name=name,
position=position,
argtype=argtype,
mandatory=mandatory,
requires=requires,
excludes=excludes,
default=default,
description=description)
def parse(self):
"""
:return: connection arguments keyed by argument value
:rtype: ConnectArguments
"""
self._register_args()
if not self._named_args:
raise ValueError('the current parser: "{}" has no configured arguments\n'
'Any arguments passed to this parser will be ignored.'
'You may have forgot to configure a custom parser'.format(
self.__class__.__name__
))
args_dict = self._parse_args(*self._args, **self._kwargs)
return ConnectArguments(args_dict)
def __str__(self):
pass
# TODO string representation of arguments
# textwrap.indent(text, prefix, predicate=None)
|
(*args, **kwargs)
|
714,002
|
pyjdbc.connect
|
__init__
| null |
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._named_args = {}
self._position_args = {}
|
(self, *args, **kwargs)
|
714,003
|
pyjdbc.connect
|
__str__
| null |
def __str__(self):
pass
# TODO string representation of arguments
# textwrap.indent(text, prefix, predicate=None)
|
(self)
|
714,004
|
pyjdbc.connect
|
_parse_args
| null |
def _parse_args(self, *args, **kwargs):
# TODO test for non-existant fields in requires, and excludes
if self._position_args:
num_positional_args = max(self._position_args.keys()) + 1
else:
num_positional_args = 0
if len(args) > num_positional_args:
raise ValueError('too many positional arguments, accepts at most: {}'.format(num_positional_args))
# map argument-name to value for *args
pos_values = {self._position_args[idx].name: val for idx, val in enumerate(args)}
keyword_values = {}
for name, value in kwargs.items():
if name not in self._named_args:
valid_keywords = '\n'.join(self._named_args)
raise ValueError('invalid keyword argument: "{}", valid names:\n{}'.format(name, valid_keywords))
if name in pos_values:
raise ValueError('argument repeated by position and keyword: "{}"'.format(name))
keyword_values[name] = value
keyword_values.update(pos_values)
# check for missing mandatory arguments
mandatory = [arg.name for arg in self._named_args.values() if arg.mandatory and not arg.default]
missing = set(mandatory) - set(keyword_values)
if missing:
missing_str = '\n'.join(missing)
raise ValueError('required arguments missing:\n{}\n'
'note that all positional arguments are mandatory'.format(
textwrap.indent(missing_str, ' '*4)))
# check for valid types:
for name, value in keyword_values.items():
arg = self._named_args[name]
if arg.argtype is None or value is None:
continue
# if argtype is a CLASS object test for subclass
try:
is_subclass = issubclass(value, arg.argtype)
except TypeError:
is_subclass = False
# check the argument type, unless there is a function defined
if not inspect.isclass(arg.argtype) and not isinstance(value, arg.argtype) and not is_subclass:
raise TypeError('argument type invalid: "{}" expected {}, got: {}'.format(
name,
type(arg.argtype),
type(value)
))
# check for includes/excludes
for name in keyword_values:
requires = self._named_args[name].requires
excludes = self._named_args[name].excludes
if requires:
missing_required = set(requires) - set(keyword_values.keys())
if missing_required:
missing_required = ', '.join(missing_required)
raise ValueError('argument: "{}" requires these args to ALSO be set: {}'.format(
name, missing_required))
if excludes:
present_excludes = set(excludes) & set(keyword_values.keys())
if present_excludes:
present_excludes = ', '.join(present_excludes)
raise ValueError('argument: "{}" requires these args to NOT be set: {}'.format(
name, present_excludes))
# apply defaults
for name, arg in self._named_args.items():
if arg.default is not None and keyword_values.get(name) is None:
keyword_values[name] = arg.default
# apply functions
for name in keyword_values:
arg = self._named_args[name]
current_value = keyword_values[name]
# don't compute functions for "null" values
if current_value is None or arg.fn is None:
continue
# call the decorated function
new_value = arg.fn(self, current_value)
if arg.argtype is not None:
if not isinstance(new_value, arg.argtype) and not arg.fn is None:
raise TypeError('argument function returned type invalid: "{}" expected {}, got: {}\n'.format(
name,
type(arg.argtype),
type(new_value)
))
keyword_values[name] = new_value
for name, value in keyword_values.items():
arg = self._named_args[name]
if not arg.choices:
continue
if value not in arg.choices:
raise ValueError('argument "{}" value "{}" invalid, must be one of: ({})'.format(
name,
value,
', '.join(arg.choices)
))
return keyword_values
|
(self, *args, **kwargs)
|
714,005
|
pyjdbc.connect
|
_register_args
| null |
def _register_args(self):
# walk the class hierarchy in reverse
for cls in reversed(type(self).mro()):
for obj_name, obj in cls.__dict__.items():
arg = getattr(obj, DECORATOR_ATTRIBUTE, None)
if isinstance(obj, ArgumentOpts):
arg = obj
if not isinstance(arg, ArgumentOpts):
continue
# if the argument has no name, the property that defines it is the name
if not arg.name:
arg.name = obj_name
if arg.name in self._named_args:
raise NameError('argument is already defined: {}'.format(arg.name))
self._named_args[arg.name] = arg
self._register_positional()
|
(self)
|
714,006
|
pyjdbc.connect
|
_register_positional
| null |
def _register_positional(self):
# ensure there are not duplicate positions
for arg in self._named_args.values():
if arg.position is None:
continue
self._position_args[arg.position] = arg
if self._position_args:
pmax = max(self._position_args)
inclusive = set(range(0, pmax+1))
non_sequential = set(self._position_args) ^ inclusive
if non_sequential:
raise ValueError('positional arguments are not sequential, '
'these positions are missing: {}'.format(list(non_sequential)))
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.