_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q271100 | WikiText.external_links | test | def external_links(self) -> List['ExternalLink']:
"""Return a list of found external link objects.
Note:
Templates adjacent to external links are considered part of the
link. In reality, this depends on the contents of the template:
>>> WikiText(
... 'http://example.com{{dead link}}'
...).external_links[0].url
'http://example.com{{dead link}}'
>>> WikiText(
... '[http://example.com{{space template}} text]'
...).external_links[0].url
'http://example.com{{space template}}'
"""
external_links = [] # type: List['ExternalLink']
external_links_append = external_links.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = self._span
spans = type_to_spans.setdefault('ExternalLink', [])
if not spans:
# All the added spans will be new.
spans_append = spans.append
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = [ss + s, ss + e]
spans_append(span)
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
# There are already some ExternalLink spans. Use the already existing
# ones when the detected span is one of those.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = s, e = [s + ss, e + ss]
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
insort(spans, span)
else:
span = old_span
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links | python | {
"resource": ""
} |
q271101 | WikiText.sections | test | def sections(self) -> List['Section']:
"""Return a list of section in current wikitext.
The first section will always be the lead section, even if it is an
empty string.
"""
sections = [] # type: List['Section']
sections_append = sections.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = _span = self._span
type_spans = type_to_spans.setdefault('Section', [])
full_match = SECTIONS_FULLMATCH(self._shadow)
section_spans = full_match.spans('section')
levels = [len(eq) for eq in full_match.captures('equals')]
if not type_spans:
# All spans are new
spans_append = type_spans.append
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
span = [ss + s, ss + e]
spans_append(span)
sections_append(
Section(lststr, type_to_spans, span, 'Section'))
return sections
# There are already some spans. Instead of appending new spans
# use them when the detected span already exists.
span_tuple_to_span = {(s[0], s[1]): s for s in type_spans}.get
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
s, e = ss + s, ss + e
old_span = span_tuple_to_span((s, e))
if old_span is None:
span = [s, e]
insort(type_spans, span)
else:
span = old_span
sections_append(Section(lststr, type_to_spans, span, 'Section'))
return sections | python | {
"resource": ""
} |
q271102 | WikiText.tables | test | def tables(self) -> List['Table']:
"""Return a list of found table objects."""
tables = [] # type: List['Table']
tables_append = tables.append
type_to_spans = self._type_to_spans
lststr = self._lststr
shadow = self._shadow[:]
ss, se = self._span
spans = type_to_spans.setdefault('Table', [])
if not spans:
# All the added spans will be new.
m = True # type: Any
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
span = [ss + ms + len(m[1]), ss + me]
spans.append(span)
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
# There are already exists some spans. Try to use the already existing
# before appending new spans.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
m = True
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
s, e = ss + ms + len(m[1]), ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables | python | {
"resource": ""
} |
q271103 | WikiText.lists | test | def lists(self, pattern: str = None) -> List['WikiList']:
r"""Return a list of WikiList objects.
:param pattern: The starting pattern for list items.
Return all types of lists (ol, ul, and dl) if pattern is None.
If pattern is not None, it will be passed to the regex engine,
remember to escape the `*` character. Examples:
- `\#` means top-level ordered lists
- `\#\*` means unordred lists inside an ordered one
- Currently definition lists are not well supported, but you
can use `[:;]` as their pattern.
Tips and tricks:
Be careful when using the following patterns as they will
probably cause malfunction in the `sublists` method of the
resultant List. (However don't worry about them if you are
not going to use the `sublists` method.)
- Use `\*+` as a pattern and nested unordered lists will be
treated as flat.
- Use `\*\s*` as pattern to rtstrip `items` of the list.
Although the pattern parameter is optional, but specifying it
can improve the performance.
"""
lists = []
lists_append = lists.append
lststr = self._lststr
type_to_spans = self._type_to_spans
spans = type_to_spans.setdefault('WikiList', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
shadow, ss = self._lists_shadow_ss
for pattern in \
(r'\#', r'\*', '[:;]') if pattern is None else (pattern,):
for m in finditer(
LIST_PATTERN_FORMAT.replace(b'{pattern}', pattern.encode()),
shadow, MULTILINE
):
ms, me = m.span()
s, e = ss + ms, ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
lists_append(WikiList(
lststr, pattern, m, type_to_spans, span, 'WikiList'))
return lists | python | {
"resource": ""
} |
q271104 | WikiText.tags | test | def tags(self, name=None) -> List['Tag']:
"""Return all tags with the given name."""
lststr = self._lststr
type_to_spans = self._type_to_spans
if name:
if name in _tag_extensions:
string = lststr[0]
return [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']
if string.startswith('<' + name, span[0])]
tags = [] # type: List['Tag']
else:
# There is no name, add all extension tags. Before using shadow.
tags = [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']]
tags_append = tags.append
# Get the left-most start tag, match it to right-most end tag
# and so on.
ss = self._span[0]
shadow = self._shadow
if name:
# There is a name but it is not in TAG_EXTENSIONS.
reversed_start_matches = reversed([m for m in regex_compile(
START_TAG_PATTERN.replace(
rb'{name}', rb'(?P<name>' + name.encode() + rb')')
).finditer(shadow)])
end_search = regex_compile(END_TAG_PATTERN .replace(
b'{name}', name.encode())).search
else:
reversed_start_matches = reversed(
[m for m in START_TAG_FINDITER(shadow)])
shadow_copy = shadow[:]
spans = type_to_spans.setdefault('Tag', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
spans_append = spans.append
for start_match in reversed_start_matches:
if start_match['self_closing']:
# Don't look for the end tag
s, e = start_match.span()
span = [ss + s, ss + e]
else:
# look for the end-tag
if name:
# the end_search is already available
# noinspection PyUnboundLocalVariable
end_match = end_search(shadow_copy, start_match.end())
else:
# build end_search according to start tag name
end_match = search(
END_TAG_PATTERN.replace(
b'{name}', start_match['name']),
shadow_copy)
if end_match:
s, e = end_match.span()
shadow_copy[s:e] = b'_' * (e - s)
span = [ss + start_match.start(), ss + e]
else:
# Assume start-only tag.
s, e = start_match.span()
span = [ss + s, ss + e]
old_span = span_tuple_to_span_get((span[0], span[1]))
if old_span is None:
spans_append(span)
else:
span = old_span
tags_append(Tag(lststr, type_to_spans, span, 'Tag'))
return sorted(tags, key=attrgetter('_span')) | python | {
"resource": ""
} |
q271105 | SubWikiText._subspans | test | def _subspans(self, _type: str) -> Generator[int, None, None]:
"""Yield all the sub-span indices excluding self._span."""
ss, se = self._span
spans = self._type_to_spans[_type]
# Do not yield self._span by bisecting for s < ss.
# The second bisect is an optimization and should be on [se + 1],
# but empty spans are not desired thus [se] is used.
b = bisect(spans, [ss])
for span in spans[b:bisect(spans, [se], b)]:
if span[1] <= se:
yield span | python | {
"resource": ""
} |
q271106 | SubWikiText.ancestors | test | def ancestors(self, type_: Optional[str] = None) -> List['WikiText']:
"""Return the ancestors of the current node.
:param type_: the type of the desired ancestors as a string.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means all the ancestors of any type above.
"""
if type_ is None:
types = SPAN_PARSER_TYPES
else:
types = type_,
lststr = self._lststr
type_to_spans = self._type_to_spans
ss, se = self._span
ancestors = []
ancestors_append = ancestors.append
for type_ in types:
cls = globals()[type_]
spans = type_to_spans[type_]
for span in spans[:bisect(spans, [ss])]:
if se < span[1]:
ancestors_append(cls(lststr, type_to_spans, span, type_))
return sorted(ancestors, key=lambda i: ss - i._span[0]) | python | {
"resource": ""
} |
q271107 | SubWikiText.parent | test | def parent(self, type_: Optional[str] = None) -> Optional['WikiText']:
"""Return the parent node of the current object.
:param type_: the type of the desired parent object.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means the first parent, of any type above.
:return: parent WikiText object or None if no parent with the desired
`type_` is found.
"""
ancestors = self.ancestors(type_)
if ancestors:
return ancestors[0]
return None | python | {
"resource": ""
} |
q271108 | mode | test | def mode(list_: List[T]) -> T:
"""Return the most common item in the list.
Return the first one if there are more than one most common items.
Example:
>>> mode([1,1,2,2,])
1
>>> mode([1,2,2])
2
>>> mode([])
...
ValueError: max() arg is an empty sequence
"""
return max(set(list_), key=list_.count) | python | {
"resource": ""
} |
q271109 | get_arg | test | def get_arg(name: str, args: Iterable[Argument]) -> Optional[Argument]:
"""Return the first argument in the args that has the given name.
Return None if no such argument is found.
As the computation of self.arguments is a little costly, this
function was created so that other methods that have already computed
the arguments use it instead of calling self.get_arg directly.
"""
for arg in args:
if arg.name.strip(WS) == name.strip(WS):
return arg
return None | python | {
"resource": ""
} |
q271110 | Template.normal_name | test | def normal_name(
self,
rm_namespaces=('Template',),
capital_links=False,
_code: str = None,
*,
code: str = None,
capitalize=False
) -> str:
"""Return normal form of self.name.
- Remove comments.
- Remove language code.
- Remove namespace ("template:" or any of `localized_namespaces`.
- Use space instead of underscore.
- Remove consecutive spaces.
- Use uppercase for the first letter if `capitalize`.
- Remove #anchor.
:param rm_namespaces: is used to provide additional localized
namespaces for the template namespace. They will be removed from
the result. Default is ('Template',).
:param capitalize: If True, convert the first letter of the
template's name to a capital letter. See
[[mw:Manual:$wgCapitalLinks]] for more info.
:param code: is the language code.
:param capital_links: deprecated.
:param _code: deprecated.
Example:
>>> Template(
... '{{ eN : tEmPlAtE : <!-- c --> t_1 # b | a }}'
... ).normal_name(code='en')
'T 1'
"""
if capital_links:
warn('`capital_links` argument is deprecated,'
' use `capitalize` instead', DeprecationWarning)
capitalize = capital_links
if _code:
warn('`positional_code` argument is deprecated,'
' use `code` instead', DeprecationWarning)
code = _code
# Remove comments
name = COMMENT_SUB('', self.name).strip(WS)
# Remove code
if code:
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if code.lower() == head.strip(' ').lower():
name = tail.strip(' ')
# Remove namespace
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if head:
ns = head.strip(' ').lower()
for namespace in rm_namespaces:
if namespace.lower() == ns:
name = tail.strip(' ')
break
# Use space instead of underscore
name = name.replace('_', ' ')
if capitalize:
# Use uppercase for the first letter
n0 = name[0]
if n0.islower():
name = n0.upper() + name[1:]
# Remove #anchor
name, sep, tail = name.partition('#')
return ' '.join(name.split()) | python | {
"resource": ""
} |
q271111 | Template.rm_first_of_dup_args | test | def rm_first_of_dup_args(self) -> None:
"""Eliminate duplicate arguments by removing the first occurrences.
Remove the first occurrences of duplicate arguments, regardless of
their value. Result of the rendered wikitext should remain the same.
Warning: Some meaningful data may be removed from wikitext.
Also see `rm_dup_args_safe` function.
"""
names = set() # type: set
for a in reversed(self.arguments):
name = a.name.strip(WS)
if name in names:
del a[:len(a.string)]
else:
names.add(name) | python | {
"resource": ""
} |
q271112 | Template.rm_dup_args_safe | test | def rm_dup_args_safe(self, tag: str = None) -> None:
"""Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
"""
name_to_lastarg_vals = {} \
# type: Dict[str, Tuple[Argument, List[str]]]
# Removing positional args affects their name. By reversing the list
# we avoid encountering those kind of args.
for arg in reversed(self.arguments):
name = arg.name.strip(WS)
if arg.positional:
# Value of keyword arguments is automatically stripped by MW.
val = arg.value
else:
# But it's not OK to strip whitespace in positional arguments.
val = arg.value.strip(WS)
if name in name_to_lastarg_vals:
# This is a duplicate argument.
if not val:
# This duplicate argument is empty. It's safe to remove it.
del arg[0:len(arg.string)]
else:
# Try to remove any of the detected duplicates of this
# that are empty or their value equals to this one.
lastarg, dup_vals = name_to_lastarg_vals[name]
if val in dup_vals:
del arg[0:len(arg.string)]
elif '' in dup_vals:
# This happens only if the last occurrence of name has
# been an empty string; other empty values will
# be removed as they are seen.
# In other words index of the empty argument in
# dup_vals is always 0.
del lastarg[0:len(lastarg.string)]
dup_vals.pop(0)
else:
# It was not possible to remove any of the duplicates.
dup_vals.append(val)
if tag:
arg.value += tag
else:
name_to_lastarg_vals[name] = (arg, [val]) | python | {
"resource": ""
} |
q271113 | Template.set_arg | test | def set_arg(
self, name: str,
value: str,
positional: bool = None,
before: str = None,
after: str = None,
preserve_spacing: bool = True
) -> None:
"""Set the value for `name` argument. Add it if it doesn't exist.
- Use `positional`, `before` and `after` keyword arguments only when
adding a new argument.
- If `before` is given, ignore `after`.
- If neither `before` nor `after` are given and it's needed to add a
new argument, then append the new argument to the end.
- If `positional` is True, try to add the given value as a positional
argument. Ignore `preserve_spacing` if positional is True.
If it's None, do what seems more appropriate.
"""
args = list(reversed(self.arguments))
arg = get_arg(name, args)
# Updating an existing argument.
if arg:
if positional:
arg.positional = positional
if preserve_spacing:
val = arg.value
arg.value = val.replace(val.strip(WS), value)
else:
arg.value = value
return
# Adding a new argument
if not name and positional is None:
positional = True
# Calculate the whitespace needed before arg-name and after arg-value.
if not positional and preserve_spacing and args:
before_names = []
name_lengths = []
before_values = []
after_values = []
for arg in args:
aname = arg.name
name_len = len(aname)
name_lengths.append(name_len)
before_names.append(STARTING_WS_MATCH(aname)[0])
arg_value = arg.value
before_values.append(STARTING_WS_MATCH(arg_value)[0])
after_values.append(ENDING_WS_MATCH(arg_value)[0])
pre_name_ws_mode = mode(before_names)
name_length_mode = mode(name_lengths)
post_value_ws_mode = mode(
[SPACE_AFTER_SEARCH(self.string)[0]] + after_values[1:]
)
pre_value_ws_mode = mode(before_values)
else:
preserve_spacing = False
# Calculate the string that needs to be added to the Template.
if positional:
# Ignore preserve_spacing for positional args.
addstring = '|' + value
else:
if preserve_spacing:
# noinspection PyUnboundLocalVariable
addstring = (
'|' + (pre_name_ws_mode + name.strip(WS)).
ljust(name_length_mode) +
'=' + pre_value_ws_mode + value + post_value_ws_mode
)
else:
addstring = '|' + name + '=' + value
# Place the addstring in the right position.
if before:
arg = get_arg(before, args)
arg.insert(0, addstring)
elif after:
arg = get_arg(after, args)
arg.insert(len(arg.string), addstring)
else:
if args and not positional:
arg = args[0]
arg_string = arg.string
if preserve_spacing:
# Insert after the last argument.
# The addstring needs to be recalculated because we don't
# want to change the the whitespace before final braces.
# noinspection PyUnboundLocalVariable
arg[0:len(arg_string)] = (
arg.string.rstrip(WS) + post_value_ws_mode +
addstring.rstrip(WS) + after_values[0]
)
else:
arg.insert(len(arg_string), addstring)
else:
# The template has no arguments or the new arg is
# positional AND is to be added at the end of the template.
self.insert(-2, addstring) | python | {
"resource": ""
} |
q271114 | Template.get_arg | test | def get_arg(self, name: str) -> Optional[Argument]:
"""Return the last argument with the given name.
Return None if no argument with that name is found.
"""
return get_arg(name, reversed(self.arguments)) | python | {
"resource": ""
} |
q271115 | Template.has_arg | test | def has_arg(self, name: str, value: str = None) -> bool:
"""Return true if the is an arg named `name`.
Also check equality of values if `value` is provided.
Note: If you just need to get an argument and you want to LBYL, it's
better to get_arg directly and then check if the returned value
is None.
"""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
if value:
if arg.positional:
if arg.value == value:
return True
return False
if arg.value.strip(WS) == value.strip(WS):
return True
return False
return True
return False | python | {
"resource": ""
} |
q271116 | Template.del_arg | test | def del_arg(self, name: str) -> None:
"""Delete all arguments with the given then."""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
del arg[:] | python | {
"resource": ""
} |
q271117 | crscode_to_string | test | def crscode_to_string(codetype, code, format):
"""
Lookup crscode on spatialreference.org and return in specified format.
Arguments:
- *codetype*: "epsg", "esri", or "sr-org".
- *code*: The code.
- *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others...
Returns:
- Crs string in the specified format.
"""
link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,format)
result = urllib2.urlopen(link).read()
if not isinstance(result, str):
result = result.decode()
return result | python | {
"resource": ""
} |
q271118 | find | test | def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_") or itemname == 'Ellipsoid':
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if ellipsname == itemname:
return item
except:
pass
else:
return None | python | {
"resource": ""
} |
q271119 | from_url | test | def from_url(url, format=None):
"""
Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object.
"""
# first get string from url
string = urllib2.urlopen(url).read()
if PY3 is True:
# decode str into string
string = string.decode('utf-8')
# then determine parser
if format:
# user specified format
format = format.lower().replace(" ", "_")
func = parse.__getattr__("from_%s" % format)
else:
# unknown format
func = parse.from_unknown_text
# then load
crs = func(string)
return crs | python | {
"resource": ""
} |
q271120 | from_file | test | def from_file(filepath):
"""
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
"""
if filepath.endswith(".prj"):
string = open(filepath, "r").read()
return parse.from_unknown_wkt(string)
elif filepath.endswith((".geojson",".json")):
raw = open(filepath).read()
geoj = json.loads(raw)
if "crs" in geoj:
crsinfo = geoj["crs"]
if crsinfo["type"] == "name":
string = crsinfo["properties"]["name"]
return parse.from_unknown_text(string)
elif crsinfo["type"] == "link":
url = crsinfo["properties"]["name"]
type = crsinfo["properties"].get("type")
return from_url(url, format=type)
else: raise FormatError("Invalid GeoJSON crs type: must be either 'name' or 'link'")
else:
# assume default wgs84 as per the spec
return parse.from_epsg_code("4326") | python | {
"resource": ""
} |
q271121 | from_epsg_code | test | def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs | python | {
"resource": ""
} |
q271122 | from_esri_code | test | def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("esri", code, "proj4")
crs = from_proj4(proj4)
return crs | python | {
"resource": ""
} |
q271123 | from_sr_code | test | def from_sr_code(code):
"""
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("sr-org", code, "proj4")
crs = from_proj4(proj4)
return crs | python | {
"resource": ""
} |
q271124 | from_unknown_text | test | def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats")
return crs | python | {
"resource": ""
} |
q271125 | RawVLR.write_to | test | def write_to(self, out):
""" Write the raw header content to the out stream
Parameters:
----------
out : {file object}
The output stream
"""
out.write(bytes(self.header))
out.write(self.record_data) | python | {
"resource": ""
} |
q271126 | RawVLR.read_from | test | def read_from(cls, data_stream):
""" Instantiate a RawVLR by reading the content from the
data stream
Parameters:
----------
data_stream : {file object}
The input stream
Returns
-------
RawVLR
The RawVLR read
"""
raw_vlr = cls()
header = RawVLRHeader.from_stream(data_stream)
raw_vlr.header = header
raw_vlr.record_data = data_stream.read(header.record_length_after_header)
return raw_vlr | python | {
"resource": ""
} |
q271127 | parse_geo_tiff_keys_from_vlrs | test | def parse_geo_tiff_keys_from_vlrs(vlr_list: vlrlist.VLRList) -> List[GeoTiffKey]:
""" Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: pylas.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
"""
geo_key_dir = vlr_list.get_by_id(
GeoKeyDirectoryVlr.official_user_id(), GeoKeyDirectoryVlr.official_record_ids()
)[0]
geo_doubles = vlr_list.get_by_id(
GeoDoubleParamsVlr.official_user_id(), GeoDoubleParamsVlr.official_record_ids()
)[0]
geo_ascii = vlr_list.get_by_id(
GeoAsciiParamsVlr.official_user_id(), GeoAsciiParamsVlr.official_record_ids()
)[0]
return parse_geo_tiff(geo_key_dir, geo_doubles, geo_ascii) | python | {
"resource": ""
} |
q271128 | parse_geo_tiff | test | def parse_geo_tiff(
key_dir_vlr: GeoKeyDirectoryVlr,
double_vlr: GeoDoubleParamsVlr,
ascii_vlr: GeoAsciiParamsVlr,
) -> List[GeoTiffKey]:
""" Parses the GeoTiff VLRs information into nicer structs
"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset
elif k.tiff_tag_location == 34736:
value = double_vlr.doubles[k.value_offset]
elif k.tiff_tag_location == 34737:
try:
value = ascii_vlr.strings[k.value_offset][k.count :]
except IndexError:
# Maybe I'm just misunderstanding the specification :thinking:
value = ascii_vlr.strings[0][k.value_offset : k.value_offset + k.count]
else:
logger.warning(
"GeoTiffKey with unknown tiff tag location ({})".format(
k.tiff_tag_location
)
)
continue
geotiff_keys.append(GeoTiffKey(k.id, value))
return geotiff_keys | python | {
"resource": ""
} |
q271129 | get_signedness_for_extra_dim | test | def get_signedness_for_extra_dim(type_index):
""" Returns the signedness foe the given type index
Parameters
----------
type_index: int
index of the type as defined in the LAS Specification
Returns
-------
DimensionSignedness,
the enum variant
"""
try:
t = _extra_dims_style_2[type_index]
if "uint" in t:
return DimensionSignedness.UNSIGNED
elif "int" in t:
return DimensionSignedness.SIGNED
else:
return DimensionSignedness.FLOATING
except IndexError:
raise errors.UnknownExtraType(type_index) | python | {
"resource": ""
} |
q271130 | get_id_for_extra_dim_type | test | def get_id_for_extra_dim_type(type_str):
""" Returns the index of the type as defined in the LAS Specification
Parameters
----------
type_str: str
Returns
-------
int
index of the type
"""
try:
return _type_to_extra_dim_id_style_1[type_str]
except KeyError:
try:
return _type_to_extra_dim_id_style_2[type_str]
except KeyError:
raise errors.UnknownExtraType(type_str) | python | {
"resource": ""
} |
q271131 | PointRecord.from_point_record | test | def from_point_record(cls, other_point_record, new_point_format):
""" Construct a new PackedPointRecord from an existing one with the ability to change
to point format while doing so
"""
array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype)
new_record = cls(array, new_point_format)
new_record.copy_fields_from(other_point_record)
return new_record | python | {
"resource": ""
} |
q271132 | PointRecord.copy_fields_from | test | def copy_fields_from(self, other_record):
""" Tries to copy the values of the current dimensions from other_record
"""
for dim_name in self.dimensions_names:
try:
self[dim_name] = other_record[dim_name]
except ValueError:
pass | python | {
"resource": ""
} |
q271133 | PointRecord._append_zeros_if_too_small | test | def _append_zeros_if_too_small(self, value):
""" Appends zeros to the points stored if the value we are trying to
fit is bigger
"""
size_diff = len(value) - len(self.array)
if size_diff:
self.array = np.append(
self.array, np.zeros(size_diff, dtype=self.array.dtype)
) | python | {
"resource": ""
} |
q271134 | PackedPointRecord.all_dimensions_names | test | def all_dimensions_names(self):
""" Returns all the dimensions names, including the names of sub_fields
and their corresponding packed fields
"""
return frozenset(self.array.dtype.names + tuple(self.sub_fields_dict.keys())) | python | {
"resource": ""
} |
q271135 | PackedPointRecord.zeros | test | def zeros(cls, point_format, point_count):
""" Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format_id: int
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
"""
data = np.zeros(point_count, point_format.dtype)
return cls(data, point_format) | python | {
"resource": ""
} |
q271136 | PackedPointRecord.from_stream | test | def from_stream(cls, stream, point_format, count):
""" Construct the point record by reading the points from the stream
"""
points_dtype = point_format.dtype
point_data_buffer = bytearray(stream.read(count * points_dtype.itemsize))
try:
data = np.frombuffer(point_data_buffer, dtype=points_dtype, count=count)
except ValueError:
expected_bytes_len = count * points_dtype.itemsize
if len(point_data_buffer) % points_dtype.itemsize != 0:
missing_bytes_len = expected_bytes_len - len(point_data_buffer)
raise_not_enough_bytes_error(
expected_bytes_len,
missing_bytes_len,
len(point_data_buffer),
points_dtype,
)
else:
actual_count = len(point_data_buffer) // points_dtype.itemsize
logger.critical(
"Expected {} points, there are {} ({} missing)".format(
count, actual_count, count - actual_count
)
)
data = np.frombuffer(
point_data_buffer, dtype=points_dtype, count=actual_count
)
return cls(data, point_format) | python | {
"resource": ""
} |
q271137 | PackedPointRecord.from_compressed_buffer | test | def from_compressed_buffer(cls, compressed_buffer, point_format, count, laszip_vlr):
""" Construct the point record by reading and decompressing the points data from
the input buffer
"""
point_dtype = point_format.dtype
uncompressed = decompress_buffer(
compressed_buffer, point_dtype, count, laszip_vlr
)
return cls(uncompressed, point_format) | python | {
"resource": ""
} |
q271138 | LasBase.x | test | def x(self):
""" Returns the scaled x positions of the points as doubles
"""
return scale_dimension(self.X, self.header.x_scale, self.header.x_offset) | python | {
"resource": ""
} |
q271139 | LasBase.y | test | def y(self):
""" Returns the scaled y positions of the points as doubles
"""
return scale_dimension(self.Y, self.header.y_scale, self.header.y_offset) | python | {
"resource": ""
} |
q271140 | LasBase.z | test | def z(self):
""" Returns the scaled z positions of the points as doubles
"""
return scale_dimension(self.Z, self.header.z_scale, self.header.z_offset) | python | {
"resource": ""
} |
q271141 | LasBase.add_extra_dim | test | def add_extra_dim(self, name, type, description=""):
""" Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
"""
name = name.replace(" ", "_")
type_id = extradims.get_id_for_extra_dim_type(type)
extra_byte = ExtraBytesStruct(
data_type=type_id, name=name.encode(), description=description.encode()
)
try:
extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0]
except IndexError:
extra_bytes_vlr = ExtraBytesVlr()
self.vlrs.append(extra_bytes_vlr)
finally:
extra_bytes_vlr.extra_bytes_structs.append(extra_byte)
self.points_data.add_extra_dims([(name, type)]) | python | {
"resource": ""
} |
q271142 | LasBase.write_to | test | def write_to(self, out_stream, do_compress=False):
""" writes the data to a stream
Parameters
----------
out_stream: file object
the destination stream, implementing the write method
do_compress: bool, optional, default False
Flag to indicate if you want the date to be compressed
"""
self.update_header()
if (
self.vlrs.get("ExtraBytesVlr")
and not self.points_data.extra_dimensions_names
):
logger.error(
"Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, "
"removing the vlr"
)
self.vlrs.extract("ExtraBytesVlr")
if do_compress:
laz_vrl = create_laz_vlr(self.points_data)
self.vlrs.append(known.LasZipVlr(laz_vrl.data()))
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
self.header.point_format_id = uncompressed_id_to_compressed(
self.header.point_format_id
)
self.header.number_of_vlr = len(raw_vlrs)
points_bytes = compress_buffer(
np.frombuffer(self.points_data.array, np.uint8),
laz_vrl.schema,
self.header.offset_to_point_data,
).tobytes()
else:
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.number_of_vlr = len(raw_vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
points_bytes = self.points_data.raw_bytes()
self.header.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.size)
raw_vlrs.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.offset_to_point_data)
out_stream.write(points_bytes) | python | {
"resource": ""
} |
q271143 | LasBase.write_to_file | test | def write_to_file(self, filename, do_compress=None):
""" Writes the las data into a file
Parameters
----------
filename : str
The file where the data should be written.
do_compress: bool, optional, default None
if None the extension of the filename will be used
to determine if the data should be compressed
otherwise the do_compress flag indicate if the data should be compressed
"""
is_ext_laz = filename.split(".")[-1] == "laz"
if is_ext_laz and do_compress is None:
do_compress = True
with open(filename, mode="wb") as out:
self.write_to(out, do_compress=do_compress) | python | {
"resource": ""
} |
q271144 | LasBase.write | test | def write(self, destination, do_compress=None):
""" Writes to a stream or file
When destination is a string, it will be interpreted as the path were the file should be written to,
also if do_compress is None, the compression will be guessed from the file extension:
- .laz -> compressed
- .las -> uncompressed
.. note::
This means that you could do something like:
# Create .laz but not compressed
las.write('out.laz', do_compress=False)
# Create .las but compressed
las.write('out.las', do_compress=True)
While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
"""
if isinstance(destination, str):
self.write_to_file(destination)
else:
if do_compress is None:
do_compress = False
self.write_to(destination, do_compress=do_compress) | python | {
"resource": ""
} |
q271145 | _build_point_formats_dtypes | test | def _build_point_formats_dtypes(point_format_dimensions, dimensions_dict):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are still packed, and need to be unpacked each time
you want to access them
"""
return {
fmt_id: _point_format_to_dtype(point_fmt, dimensions_dict)
for fmt_id, point_fmt in point_format_dimensions.items()
} | python | {
"resource": ""
} |
q271146 | _build_unpacked_point_formats_dtypes | test | def _build_unpacked_point_formats_dtypes(
point_formats_dimensions, composed_fields_dict, dimensions_dict
):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are unpacked and can be accessed directly
"""
unpacked_dtypes = {}
for fmt_id, dim_names in point_formats_dimensions.items():
composed_dims, dtype = composed_fields_dict[fmt_id], []
for dim_name in dim_names:
if dim_name in composed_dims:
dtype.extend((f.name, f.type) for f in composed_dims[dim_name])
else:
dtype.append(dimensions_dict[dim_name])
unpacked_dtypes[fmt_id] = np.dtype(dtype)
return unpacked_dtypes | python | {
"resource": ""
} |
q271147 | np_dtype_to_point_format | test | def np_dtype_to_point_format(dtype, unpacked=False):
""" Tries to find a matching point format id for the input numpy dtype
To match, the input dtype has to be 100% equal to a point format dtype
so all names & dimensions types must match
Parameters:
----------
dtype : numpy.dtype
The input dtype
unpacked : bool, optional
[description] (the default is False, which [default_description])
Raises
------
errors.IncompatibleDataFormat
If No compatible point format was found
Returns
-------
int
The compatible point format found
"""
all_dtypes = (
ALL_POINT_FORMATS_DTYPE if not unpacked else UNPACKED_POINT_FORMATS_DTYPES
)
for format_id, fmt_dtype in all_dtypes.items():
if fmt_dtype == dtype:
return format_id
else:
raise errors.IncompatibleDataFormat(
"Data type of array is not compatible with any point format (array dtype: {})".format(
dtype
)
) | python | {
"resource": ""
} |
q271148 | min_file_version_for_point_format | test | def min_file_version_for_point_format(point_format_id):
""" Returns the minimum file version that supports the given point_format_id
"""
for version, point_formats in sorted(VERSION_TO_POINT_FMT.items()):
if point_format_id in point_formats:
return version
else:
raise errors.PointFormatNotSupported(point_format_id) | python | {
"resource": ""
} |
q271149 | is_point_fmt_compatible_with_version | test | def is_point_fmt_compatible_with_version(point_format_id, file_version):
""" Returns true if the file version support the point_format_id
"""
try:
return point_format_id in VERSION_TO_POINT_FMT[str(file_version)]
except KeyError:
raise errors.FileVersionNotSupported(file_version) | python | {
"resource": ""
} |
q271150 | VLRList.get | test | def get(self, vlr_type):
""" Returns the list of vlrs of the requested type
Always returns a list even if there is only one VLR of type vlr_type.
>>> import pylas
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get("WktCoordinateSystemVlr")
[]
>>> las.vlrs.get("WktCoordinateSystemVlr")[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get('ExtraBytesVlr')
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get('ExtraBytesVlr')[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
:py:class:`list`
a List of vlrs matching the user_id and records_ids
"""
return [v for v in self.vlrs if v.__class__.__name__ == vlr_type] | python | {
"resource": ""
} |
q271151 | VLRList.extract | test | def extract(self, vlr_type):
""" Returns the list of vlrs of the requested type
The difference with get is that the returned vlrs will be removed from the list
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
list
a List of vlrs matching the user_id and records_ids
"""
kept_vlrs, extracted_vlrs = [], []
for vlr in self.vlrs:
if vlr.__class__.__name__ == vlr_type:
extracted_vlrs.append(vlr)
else:
kept_vlrs.append(vlr)
self.vlrs = kept_vlrs
return extracted_vlrs | python | {
"resource": ""
} |
q271152 | VLRList.read_from | test | def read_from(cls, data_stream, num_to_read):
""" Reads vlrs and parse them if possible from the stream
Parameters
----------
data_stream : io.BytesIO
stream to read from
num_to_read : int
number of vlrs to be read
Returns
-------
pylas.vlrs.vlrlist.VLRList
List of vlrs
"""
vlrlist = cls()
for _ in range(num_to_read):
raw = RawVLR.read_from(data_stream)
try:
vlrlist.append(vlr_factory(raw))
except UnicodeDecodeError:
logger.error("Failed to decode VLR: {}".format(raw))
return vlrlist | python | {
"resource": ""
} |
q271153 | files_have_same_point_format_id | test | def files_have_same_point_format_id(las_files):
""" Returns true if all the files have the same points format id
"""
point_format_found = {las.header.point_format_id for las in las_files}
return len(point_format_found) == 1 | python | {
"resource": ""
} |
q271154 | files_have_same_dtype | test | def files_have_same_dtype(las_files):
""" Returns true if all the files have the same numpy datatype
"""
dtypes = {las.points.dtype for las in las_files}
return len(dtypes) == 1 | python | {
"resource": ""
} |
q271155 | _raise_if_wrong_file_signature | test | def _raise_if_wrong_file_signature(stream):
""" Reads the 4 first bytes of the stream to check that is LASF"""
file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE))
if file_sig != headers.LAS_FILE_SIGNATURE:
raise errors.PylasError(
"File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE)
) | python | {
"resource": ""
} |
q271156 | LasReader.read_header | test | def read_header(self):
""" Reads the head of the las file and returns it
"""
self.stream.seek(self.start_pos)
return headers.HeaderFactory().read_from_stream(self.stream) | python | {
"resource": ""
} |
q271157 | LasReader.read_vlrs | test | def read_vlrs(self):
""" Reads and return the vlrs of the file
"""
self.stream.seek(self.start_pos + self.header.size)
return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr) | python | {
"resource": ""
} |
q271158 | LasReader._read_points | test | def _read_points(self, vlrs):
""" private function to handle reading of the points record parts
of the las file.
the header is needed for the point format and number of points
the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr
"""
try:
extra_dims = vlrs.get("ExtraBytesVlr")[0].type_of_extra_dims()
except IndexError:
extra_dims = None
point_format = PointFormat(self.header.point_format_id, extra_dims=extra_dims)
if self.header.are_points_compressed:
laszip_vlr = vlrs.pop(vlrs.index("LasZipVlr"))
points = self._read_compressed_points_data(laszip_vlr, point_format)
else:
points = record.PackedPointRecord.from_stream(
self.stream, point_format, self.header.point_count
)
return points | python | {
"resource": ""
} |
q271159 | LasReader._read_compressed_points_data | test | def _read_compressed_points_data(self, laszip_vlr, point_format):
""" reads the compressed point record
"""
offset_to_chunk_table = struct.unpack("<q", self.stream.read(8))[0]
size_of_point_data = offset_to_chunk_table - self.stream.tell()
if offset_to_chunk_table <= 0:
logger.warning(
"Strange offset to chunk table: {}, ignoring it..".format(
offset_to_chunk_table
)
)
size_of_point_data = -1 # Read everything
points = record.PackedPointRecord.from_compressed_buffer(
self.stream.read(size_of_point_data),
point_format,
self.header.point_count,
laszip_vlr,
)
return points | python | {
"resource": ""
} |
q271160 | LasReader._read_internal_waveform_packet | test | def _read_internal_waveform_packet(self):
""" reads and returns the waveform vlr header, waveform record
"""
# This is strange, the spec says, waveform data packet is in a EVLR
# but in the 2 samples I have its a VLR
# but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec)
b = bytearray(self.stream.read(rawvlr.VLR_HEADER_SIZE))
waveform_header = rawvlr.RawVLRHeader.from_buffer(b)
waveform_record = self.stream.read()
logger.debug(
"Read: {} MBytes of waveform_record".format(len(waveform_record) / 10 ** 6)
)
return waveform_header, waveform_record | python | {
"resource": ""
} |
q271161 | LasReader.read_evlrs | test | def read_evlrs(self):
""" Reads the EVLRs of the file, will fail if the file version
does not support evlrs
"""
self.stream.seek(self.start_pos + self.header.start_of_first_evlr)
return evlrs.EVLRList.read_from(self.stream, self.header.number_of_evlr) | python | {
"resource": ""
} |
q271162 | LasReader._warn_if_not_at_expected_pos | test | def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of):
""" Helper function to warn about unknown bytes found in the file"""
diff = expected_pos - self.stream.tell()
if diff != 0:
logger.warning(
"There are {} bytes between {} and {}".format(diff, end_of, start_of)
) | python | {
"resource": ""
} |
q271163 | open_las | test | def open_las(source, closefd=True):
""" Opens and reads the header of the las content in the source
>>> with open_las('pylastests/simple.las') as f:
... print(f.header.point_format_id)
3
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f, closefd=False) as flas:
... print(flas.header)
<LasHeader(1.2)>
>>> f.closed
False
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f) as flas:
... las = flas.read()
>>> f.closed
True
Parameters
----------
source : str or io.BytesIO
if source is a str it must be a filename
a stream if a file object with the methods read, seek, tell
closefd: bool
Whether the stream/file object shall be closed, this only work
when using open_las in a with statement. An exception is raised if
closefd is specified and the source is a filename
Returns
-------
pylas.lasreader.LasReader
"""
if isinstance(source, str):
stream = open(source, mode="rb")
if not closefd:
raise ValueError("Cannot use closefd with filename")
elif isinstance(source, bytes):
stream = io.BytesIO(source)
else:
stream = source
return LasReader(stream, closefd=closefd) | python | {
"resource": ""
} |
q271164 | read_las | test | def read_las(source, closefd=True):
""" Entry point for reading las data in pylas
Reads the whole file into memory.
>>> las = read_las("pylastests/simple.las")
>>> las.classification
array([1, 1, 1, ..., 1, 1, 1], dtype=uint8)
Parameters
----------
source : str or io.BytesIO
The source to read data from
closefd: bool
if True and the source is a stream, the function will close it
after it is done reading
Returns
-------
pylas.lasdatas.base.LasBase
The object you can interact with to get access to the LAS points & VLRs
"""
with open_las(source, closefd=closefd) as reader:
return reader.read() | python | {
"resource": ""
} |
q271165 | create_from_header | test | def create_from_header(header):
""" Creates a File from an existing header,
allocating the array of point according to the provided header.
The input header is copied.
Parameters
----------
header : existing header to be used to create the file
Returns
-------
pylas.lasdatas.base.LasBase
"""
header = copy.copy(header)
header.point_count = 0
points = record.PackedPointRecord.empty(PointFormat(header.point_format_id))
if header.version >= "1.4":
return las14.LasData(header=header, points=points)
return las12.LasData(header=header, points=points) | python | {
"resource": ""
} |
q271166 | create_las | test | def create_las(*, point_format_id=0, file_version=None):
""" Function to create a new empty las data object
.. note::
If you provide both point_format and file_version
an exception will be raised if they are not compatible
>>> las = create_las(point_format_id=6,file_version="1.2")
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
If you provide only the point_format the file_version will automatically
selected for you.
>>> las = create_las(point_format_id=0)
>>> las.header.version == '1.2'
True
>>> las = create_las(point_format_id=6)
>>> las.header.version == '1.4'
True
Parameters
----------
point_format_id: int
The point format you want the created file to have
file_version: str, optional, default=None
The las version you want the created las to have
Returns
-------
pylas.lasdatas.base.LasBase
A new las data object
"""
if file_version is not None:
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
else:
file_version = dims.min_file_version_for_point_format(point_format_id)
header = headers.HeaderFactory.new(file_version)
header.point_format_id = point_format_id
if file_version >= "1.4":
return las14.LasData(header=header)
return las12.LasData(header=header) | python | {
"resource": ""
} |
q271167 | convert | test | def convert(source_las, *, point_format_id=None, file_version=None):
""" Converts a Las from one point format to another
Automatically upgrades the file version if source file version is not compatible with
the new point_format_id
convert to point format 0
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=0)
>>> las.header.point_format_id
0
>>> las.header.version
'1.2'
convert to point format 6, which need version >= 1.4
then convert back to point format 0, version is not downgraded
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=6)
>>> las.header.point_format_id
6
>>> las.header.version
'1.4'
>>> las = convert(las, point_format_id=0)
>>> las.header.version
'1.4'
an exception is raised if the requested point format is not compatible
with the file version
>>> las = read_las('pylastests/simple.las')
>>> convert(las, point_format_id=6, file_version='1.2')
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
Parameters
----------
source_las : pylas.lasdatas.base.LasBase
The source data to be converted
point_format_id : int, optional
The new point format id (the default is None, which won't change the source format id)
file_version : str, optional,
The new file version. None by default which means that the file_version
may be upgraded for compatibility with the new point_format. The file version will not
be downgraded.
Returns
-------
pylas.lasdatas.base.LasBase
"""
if point_format_id is None:
point_format_id = source_las.points_data.point_format.id
if file_version is None:
file_version = max(
source_las.header.version,
dims.min_file_version_for_point_format(point_format_id),
)
else:
file_version = str(file_version)
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
header = headers.HeaderFactory.convert_header(source_las.header, file_version)
header.point_format_id = point_format_id
point_format = PointFormat(
point_format_id, source_las.points_data.point_format.extra_dims
)
points = record.PackedPointRecord.from_point_record(
source_las.points_data, point_format
)
try:
evlrs = source_las.evlrs
except ValueError:
evlrs = []
if file_version >= "1.4":
las = las14.LasData(
header=header, vlrs=source_las.vlrs, points=points, evlrs=evlrs
)
else:
if evlrs:
logger.warning(
"The source contained {} EVLRs,"
" they will be lost as version {} doest not support them".format(
len(evlrs), file_version
)
)
las = las12.LasData(header=header, vlrs=source_las.vlrs, points=points)
return las | python | {
"resource": ""
} |
q271168 | merge_las | test | def merge_las(*las_files):
""" Merges multiple las files into one
merged = merge_las(las_1, las_2)
merged = merge_las([las_1, las_2, las_3])
Parameters
----------
las_files: Iterable of LasData or LasData
Returns
-------
pylas.lasdatas.base.LasBase
The result of the merging
"""
if len(las_files) == 1:
las_files = las_files[0]
if not las_files:
raise ValueError("No files to merge")
if not utils.files_have_same_dtype(las_files):
raise ValueError("All files must have the same point format")
header = las_files[0].header
num_pts_merged = sum(len(las.points) for las in las_files)
# scaled x, y, z have to be set manually
# to be sure to have a good offset in the header
merged = create_from_header(header)
# TODO extra dimensions should be manged better here
for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims:
merged.add_extra_dim(dim_name, dim_type)
merged.points = np.zeros(num_pts_merged, merged.points.dtype)
merged_x = np.zeros(num_pts_merged, np.float64)
merged_y = np.zeros(num_pts_merged, np.float64)
merged_z = np.zeros(num_pts_merged, np.float64)
offset = 0
for i, las in enumerate(las_files, start=1):
slc = slice(offset, offset + len(las.points))
merged.points[slc] = las.points
merged_x[slc] = las.x
merged_y[slc] = las.y
merged_z[slc] = las.z
merged['point_source_id'][slc] = i
offset += len(las.points)
merged.x = merged_x
merged.y = merged_y
merged.z = merged_z
return merged | python | {
"resource": ""
} |
q271169 | write_then_read_again | test | def write_then_read_again(las, do_compress=False):
""" writes the given las into memory using BytesIO and
reads it again, returning the newly read file.
Mostly used for testing purposes, without having to write to disk
"""
out = io.BytesIO()
las.write(out, do_compress=do_compress)
out.seek(0)
return read_las(out) | python | {
"resource": ""
} |
q271170 | RawHeader1_1.date | test | def date(self):
""" Returns the creation date stored in the las file
Returns
-------
datetime.date
"""
try:
return datetime.date(self.creation_year, 1, 1) + datetime.timedelta(
self.creation_day_of_year - 1
)
except ValueError:
return None | python | {
"resource": ""
} |
q271171 | RawHeader1_1.date | test | def date(self, date):
""" Returns the date of file creation as a python date object
"""
self.creation_year = date.year
self.creation_day_of_year = date.timetuple().tm_yday | python | {
"resource": ""
} |
q271172 | RawHeader1_1.mins | test | def mins(self):
""" Returns de minimum values of x, y, z as a numpy array
"""
return np.array([self.x_min, self.y_min, self.z_min]) | python | {
"resource": ""
} |
q271173 | RawHeader1_1.mins | test | def mins(self, value):
""" Sets de minimum values of x, y, z as a numpy array
"""
self.x_min, self.y_min, self.z_min = value | python | {
"resource": ""
} |
q271174 | RawHeader1_1.maxs | test | def maxs(self):
""" Returns de maximum values of x, y, z as a numpy array
"""
return np.array([self.x_max, self.y_max, self.z_max]) | python | {
"resource": ""
} |
q271175 | RawHeader1_1.maxs | test | def maxs(self, value):
""" Sets de maximum values of x, y, z as a numpy array
"""
self.x_max, self.y_max, self.z_max = value | python | {
"resource": ""
} |
q271176 | RawHeader1_1.scales | test | def scales(self):
""" Returns the scaling values of x, y, z as a numpy array
"""
return np.array([self.x_scale, self.y_scale, self.z_scale]) | python | {
"resource": ""
} |
q271177 | RawHeader1_1.offsets | test | def offsets(self):
""" Returns the offsets values of x, y, z as a numpy array
"""
return np.array([self.x_offset, self.y_offset, self.z_offset]) | python | {
"resource": ""
} |
q271178 | HeaderFactory.peek_file_version | test | def peek_file_version(cls, stream):
""" seeks to the position of the las version header fields
in the stream and returns it as a str
Parameters
----------
stream io.BytesIO
Returns
-------
str
file version read from the stream
"""
old_pos = stream.tell()
stream.seek(cls._offset_to_major_version)
major = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
minor = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
stream.seek(old_pos)
return "{}.{}".format(major, minor) | python | {
"resource": ""
} |
q271179 | HeaderFactory.convert_header | test | def convert_header(cls, old_header, new_version):
""" Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
"""
new_header_class = cls.header_class_for_version(new_version)
b = bytearray(old_header)
b += b"\x00" * (ctypes.sizeof(new_header_class) - len(b))
new_header = new_header_class.from_buffer(b)
new_header.version = str(new_version)
return new_header | python | {
"resource": ""
} |
q271180 | unpack | test | def unpack(source_array, mask, dtype=np.uint8):
""" Unpack sub field using its mask
Parameters:
----------
source_array : numpy.ndarray
The source array
mask : mask (ie: 0b00001111)
Mask of the sub field to be extracted from the source array
Returns
-------
numpy.ndarray
The sub field array
"""
lsb = least_significant_bit(mask)
return ((source_array & mask) >> lsb).astype(dtype) | python | {
"resource": ""
} |
q271181 | pack | test | def pack(array, sub_field_array, mask, inplace=False):
""" Packs a sub field's array into another array using a mask
Parameters:
----------
array : numpy.ndarray
The array in which the sub field array will be packed into
array_in : numpy.ndarray
sub field array to pack
mask : mask (ie: 0b00001111)
Mask of the sub field
inplace : {bool}, optional
If true a new array is returned. (the default is False, which modifies the array in place)
Raises
------
OverflowError
If the values contained in the sub field array are greater than its mask's number of bits
allows
"""
lsb = least_significant_bit(mask)
max_value = int(mask >> lsb)
if sub_field_array.max() > max_value:
raise OverflowError(
"value ({}) is greater than allowed (max: {})".format(
sub_field_array.max(), max_value
)
)
if inplace:
array[:] = array & ~mask
array[:] = array | ((sub_field_array << lsb) & mask).astype(array.dtype)
else:
array = array & ~mask
return array | ((sub_field_array << lsb) & mask).astype(array.dtype) | python | {
"resource": ""
} |
q271182 | lost_dimensions | test | def lost_dimensions(point_fmt_in, point_fmt_out):
""" Returns a list of the names of the dimensions that will be lost
when converting from point_fmt_in to point_fmt_out
"""
unpacked_dims_in = PointFormat(point_fmt_in).dtype
unpacked_dims_out = PointFormat(point_fmt_out).dtype
out_dims = unpacked_dims_out.fields
completely_lost = []
for dim_name in unpacked_dims_in.names:
if dim_name not in out_dims:
completely_lost.append(dim_name)
return completely_lost | python | {
"resource": ""
} |
q271183 | PointFormat.sub_fields | test | def sub_fields(self):
""" Returns a dict of the sub fields for this point format
Returns
-------
Dict[str, Tuple[str, SubField]]
maps a sub field name to its composed dimension with additional information
"""
sub_fields_dict = {}
for composed_dim_name, sub_fields in self.composed_fields.items():
for sub_field in sub_fields:
sub_fields_dict[sub_field.name] = (composed_dim_name, sub_field)
return sub_fields_dict | python | {
"resource": ""
} |
q271184 | PointFormat.num_extra_bytes | test | def num_extra_bytes(self):
""" Returns the number of extra bytes
"""
return sum(np.dtype(extra_dim[1]).itemsize for extra_dim in self.extra_dims) | python | {
"resource": ""
} |
q271185 | PointFormat.has_waveform_packet | test | def has_waveform_packet(self):
""" Returns True if the point format has waveform packet dimensions
"""
dimensions = set(self.dimension_names)
return all(name in dimensions for name in dims.WAVEFORM_FIELDS_NAMES) | python | {
"resource": ""
} |
q271186 | main | test | def main(port, ip, command, loglevel):
"""Console script for satel_integra."""
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level)
click.echo("Demo of satel_integra library")
if command == "demo":
demo(ip, port) | python | {
"resource": ""
} |
q271187 | checksum | test | def checksum(command):
"""Function to calculate checksum as per Satel manual."""
crc = 0x147A
for b in command:
# rotate (crc 1 bit left)
crc = ((crc << 1) & 0xFFFF) | (crc & 0x8000) >> 15
crc = crc ^ 0xFFFF
crc = (crc + (crc >> 8) + b) & 0xFFFF
return crc | python | {
"resource": ""
} |
q271188 | print_hex | test | def print_hex(data):
"""Debugging method to print out frames in hex."""
hex_msg = ""
for c in data:
hex_msg += "\\x" + format(c, "02x")
_LOGGER.debug(hex_msg) | python | {
"resource": ""
} |
q271189 | verify_and_strip | test | def verify_and_strip(resp):
"""Verify checksum and strip header and footer of received frame."""
if resp[0:2] != b'\xFE\xFE':
_LOGGER.error("Houston, we got problem:")
print_hex(resp)
raise Exception("Wrong header - got %X%X" % (resp[0], resp[1]))
if resp[-2:] != b'\xFE\x0D':
raise Exception("Wrong footer - got %X%X" % (resp[-2], resp[-1]))
output = resp[2:-2].replace(b'\xFE\xF0', b'\xFE')
c = checksum(bytearray(output[0:-2]))
if (256 * output[-2:-1][0] + output[-1:][0]) != c:
raise Exception("Wrong checksum - got %d expected %d" % (
(256 * output[-2:-1][0] + output[-1:][0]), c))
return output[0:-2] | python | {
"resource": ""
} |
q271190 | list_set_bits | test | def list_set_bits(r, expected_length):
"""Return list of positions of bits set to one in given data.
This method is used to read e.g. violated zones. They are marked by ones
on respective bit positions - as per Satel manual.
"""
set_bit_numbers = []
bit_index = 0x1
assert (len(r) == expected_length + 1)
for b in r[1:]:
for i in range(8):
if ((b >> i) & 1) == 1:
set_bit_numbers.append(bit_index)
bit_index += 1
return set_bit_numbers | python | {
"resource": ""
} |
q271191 | generate_query | test | def generate_query(command):
"""Add header, checksum and footer to command data."""
data = bytearray(command)
c = checksum(data)
data.append(c >> 8)
data.append(c & 0xFF)
data.replace(b'\xFE', b'\xFE\xF0')
data = bytearray.fromhex("FEFE") + data + bytearray.fromhex("FE0D")
return data | python | {
"resource": ""
} |
q271192 | demo | test | def demo(host, port):
"""Basic demo of the monitoring capabilities."""
# logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
stl = AsyncSatel(host,
port,
loop,
[1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 25, 26, 27, 28, 29, 30],
[8, 9, 10]
)
loop.run_until_complete(stl.connect())
loop.create_task(stl.arm("3333", 1))
loop.create_task(stl.disarm("3333"))
loop.create_task(stl.keep_alive())
loop.create_task(stl.monitor_status())
loop.run_forever()
loop.close() | python | {
"resource": ""
} |
q271193 | AsyncSatel.connect | test | async def connect(self):
"""Make a TCP connection to the alarm system."""
_LOGGER.debug("Connecting...")
try:
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop)
_LOGGER.debug("sucess connecting...")
except Exception as e:
_LOGGER.warning(
"Exception during connecting: %s.", e)
self._writer = None
self._reader = None
return False
return True | python | {
"resource": ""
} |
q271194 | AsyncSatel.start_monitoring | test | async def start_monitoring(self):
"""Start monitoring for interesting events."""
data = generate_query(
b'\x7F\x01\xDC\x99\x80\x00\x04\x00\x00\x00\x00\x00\x00')
await self._send_data(data)
resp = await self._read_data()
if resp is None:
_LOGGER.warning("Start monitoring - no data!")
return
if resp[1:2] != b'\xFF':
_LOGGER.warning("Monitoring not accepted.") | python | {
"resource": ""
} |
q271195 | AsyncSatel.disarm | test | async def disarm(self, code, partition_list):
"""Send command to disarm."""
_LOGGER.info("Sending disarm command.")
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
data = generate_query(b'\x84' + code_bytes
+ partition_bytes(partition_list))
await self._send_data(data) | python | {
"resource": ""
} |
q271196 | AsyncSatel.clear_alarm | test | async def clear_alarm(self, code, partition_list):
"""Send command to clear the alarm."""
_LOGGER.info("Sending clear the alarm command.")
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
data = generate_query(b'\x85' + code_bytes
+ partition_bytes(partition_list))
await self._send_data(data) | python | {
"resource": ""
} |
q271197 | AsyncSatel.set_output | test | async def set_output(self, code, output_id, state):
"""Send output turn on command to the alarm."""
"""0x88 outputs on
+ 8 bytes - user code
+ 16/32 bytes - output list
If function is accepted, function result can be
checked by observe the system state """
_LOGGER.debug("Turn on, output: %s, code: %s", output_id, code)
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
mode_command = 0x88 if state else 0x89
data = generate_query(mode_command.to_bytes(1, 'big') +
code_bytes +
output_bytes(output_id))
await self._send_data(data) | python | {
"resource": ""
} |
q271198 | AsyncSatel.keep_alive | test | async def keep_alive(self):
"""A workaround for Satel Integra disconnecting after 25s.
Every interval it sends some random question to the device, ignoring
answer - just to keep connection alive.
"""
while True:
await asyncio.sleep(self._keep_alive_timeout)
if self.closed:
return
# Command to read status of the alarm
data = generate_query(b'\xEE\x01\x01')
await self._send_data(data) | python | {
"resource": ""
} |
q271199 | AsyncSatel.monitor_status | test | async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
"""Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.") | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.