repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
google/transitfeed | merge.py | HTMLProblemAccumulator._GenerateSummary | def _GenerateSummary(self):
"""Generate a summary of the warnings and errors.
Returns:
The generated HTML as a string.
"""
items = []
if self._notices:
items.append('notices: %d' % self._notice_count)
if self._dataset_errors:
items.append('errors: %d' % self._error_count)
if self._dataset_warnings:
items.append('warnings: %d' % self._warning_count)
if items:
return '<p><span class="fail">%s</span></p>' % '<br>'.join(items)
else:
return '<p><span class="pass">feeds merged successfully</span></p>' | python | def _GenerateSummary(self):
"""Generate a summary of the warnings and errors.
Returns:
The generated HTML as a string.
"""
items = []
if self._notices:
items.append('notices: %d' % self._notice_count)
if self._dataset_errors:
items.append('errors: %d' % self._error_count)
if self._dataset_warnings:
items.append('warnings: %d' % self._warning_count)
if items:
return '<p><span class="fail">%s</span></p>' % '<br>'.join(items)
else:
return '<p><span class="pass">feeds merged successfully</span></p>' | [
"def",
"_GenerateSummary",
"(",
"self",
")",
":",
"items",
"=",
"[",
"]",
"if",
"self",
".",
"_notices",
":",
"items",
".",
"append",
"(",
"'notices: %d'",
"%",
"self",
".",
"_notice_count",
")",
"if",
"self",
".",
"_dataset_errors",
":",
"items",
".",
... | Generate a summary of the warnings and errors.
Returns:
The generated HTML as a string. | [
"Generate",
"a",
"summary",
"of",
"the",
"warnings",
"and",
"errors",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L246-L263 | train | 220,000 |
google/transitfeed | merge.py | HTMLProblemAccumulator._GenerateNotices | def _GenerateNotices(self):
"""Generate a summary of any notices.
Returns:
The generated HTML as a string.
"""
items = []
for e in self._notices:
d = e.GetDictToFormat()
if 'url' in d.keys():
d['url'] = '<a href="%(url)s">%(url)s</a>' % d
items.append('<li class="notice">%s</li>' %
e.FormatProblem(d).replace('\n', '<br>'))
if items:
return '<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items)
else:
return '' | python | def _GenerateNotices(self):
"""Generate a summary of any notices.
Returns:
The generated HTML as a string.
"""
items = []
for e in self._notices:
d = e.GetDictToFormat()
if 'url' in d.keys():
d['url'] = '<a href="%(url)s">%(url)s</a>' % d
items.append('<li class="notice">%s</li>' %
e.FormatProblem(d).replace('\n', '<br>'))
if items:
return '<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items)
else:
return '' | [
"def",
"_GenerateNotices",
"(",
"self",
")",
":",
"items",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"_notices",
":",
"d",
"=",
"e",
".",
"GetDictToFormat",
"(",
")",
"if",
"'url'",
"in",
"d",
".",
"keys",
"(",
")",
":",
"d",
"[",
"'url'",
... | Generate a summary of any notices.
Returns:
The generated HTML as a string. | [
"Generate",
"a",
"summary",
"of",
"any",
"notices",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L265-L281 | train | 220,001 |
google/transitfeed | merge.py | DataSetMerger._MergeIdentical | def _MergeIdentical(self, a, b):
"""Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical.
"""
if a != b:
raise MergeError("values must be identical ('%s' vs '%s')" %
(transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b | python | def _MergeIdentical(self, a, b):
"""Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical.
"""
if a != b:
raise MergeError("values must be identical ('%s' vs '%s')" %
(transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b | [
"def",
"_MergeIdentical",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"a",
"!=",
"b",
":",
"raise",
"MergeError",
"(",
"\"values must be identical ('%s' vs '%s')\"",
"%",
"(",
"transitfeed",
".",
"EncodeUnicode",
"(",
"a",
")",
",",
"transitfeed",
".",
... | Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical. | [
"Tries",
"to",
"merge",
"two",
"values",
".",
"The",
"values",
"are",
"required",
"to",
"be",
"identical",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L394-L411 | train | 220,002 |
google/transitfeed | merge.py | DataSetMerger._MergeIdenticalCaseInsensitive | def _MergeIdenticalCaseInsensitive(self, a, b):
"""Tries to merge two strings.
The string are required to be the same ignoring case. The second string is
always used as the merged value.
Args:
a: The first string.
b: The second string.
Returns:
The merged string. This is equal to the second string.
Raises:
MergeError: The strings were not the same ignoring case.
"""
if a.lower() != b.lower():
raise MergeError("values must be the same (case insensitive) "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b | python | def _MergeIdenticalCaseInsensitive(self, a, b):
"""Tries to merge two strings.
The string are required to be the same ignoring case. The second string is
always used as the merged value.
Args:
a: The first string.
b: The second string.
Returns:
The merged string. This is equal to the second string.
Raises:
MergeError: The strings were not the same ignoring case.
"""
if a.lower() != b.lower():
raise MergeError("values must be the same (case insensitive) "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b | [
"def",
"_MergeIdenticalCaseInsensitive",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"a",
".",
"lower",
"(",
")",
"!=",
"b",
".",
"lower",
"(",
")",
":",
"raise",
"MergeError",
"(",
"\"values must be the same (case insensitive) \"",
"\"('%s' vs '%s')\"",
... | Tries to merge two strings.
The string are required to be the same ignoring case. The second string is
always used as the merged value.
Args:
a: The first string.
b: The second string.
Returns:
The merged string. This is equal to the second string.
Raises:
MergeError: The strings were not the same ignoring case. | [
"Tries",
"to",
"merge",
"two",
"strings",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L413-L433 | train | 220,003 |
google/transitfeed | merge.py | DataSetMerger._MergeOptional | def _MergeOptional(self, a, b):
"""Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
"""
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b | python | def _MergeOptional(self, a, b):
"""Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same.
"""
if a and b:
if a != b:
raise MergeError("values must be identical if both specified "
"('%s' vs '%s')" % (transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return a or b | [
"def",
"_MergeOptional",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"a",
"and",
"b",
":",
"if",
"a",
"!=",
"b",
":",
"raise",
"MergeError",
"(",
"\"values must be identical if both specified \"",
"\"('%s' vs '%s')\"",
"%",
"(",
"transitfeed",
".",
"Enc... | Tries to merge two values which may be None.
If both values are not None, they are required to be the same and the
merge is trivial. If one of the values is None and the other is not None,
the merge results in the one which is not None. If both are None, the merge
results in None.
Args:
a: The first value.
b: The second value.
Returns:
The merged value.
Raises:
MergeError: If both values are not None and are not the same. | [
"Tries",
"to",
"merge",
"two",
"values",
"which",
"may",
"be",
"None",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L435-L458 | train | 220,004 |
google/transitfeed | merge.py | DataSetMerger._MergeSameAgency | def _MergeSameAgency(self, a_agency_id, b_agency_id):
"""Merge agency ids to the corresponding agency id in the merged schedule.
Args:
a_agency_id: an agency id from the old schedule
b_agency_id: an agency id from the new schedule
Returns:
The agency id of the corresponding merged agency.
Raises:
MergeError: If a_agency_id and b_agency_id do not correspond to the same
merged agency.
KeyError: Either aaid or baid is not a valid agency id.
"""
a_agency_id = (a_agency_id or
self.feed_merger.a_schedule.GetDefaultAgency().agency_id)
b_agency_id = (b_agency_id or
self.feed_merger.b_schedule.GetDefaultAgency().agency_id)
a_agency = self.feed_merger.a_schedule.GetAgency(
a_agency_id)._migrated_entity
b_agency = self.feed_merger.b_schedule.GetAgency(
b_agency_id)._migrated_entity
if a_agency != b_agency:
raise MergeError('agency must be the same')
return a_agency.agency_id | python | def _MergeSameAgency(self, a_agency_id, b_agency_id):
"""Merge agency ids to the corresponding agency id in the merged schedule.
Args:
a_agency_id: an agency id from the old schedule
b_agency_id: an agency id from the new schedule
Returns:
The agency id of the corresponding merged agency.
Raises:
MergeError: If a_agency_id and b_agency_id do not correspond to the same
merged agency.
KeyError: Either aaid or baid is not a valid agency id.
"""
a_agency_id = (a_agency_id or
self.feed_merger.a_schedule.GetDefaultAgency().agency_id)
b_agency_id = (b_agency_id or
self.feed_merger.b_schedule.GetDefaultAgency().agency_id)
a_agency = self.feed_merger.a_schedule.GetAgency(
a_agency_id)._migrated_entity
b_agency = self.feed_merger.b_schedule.GetAgency(
b_agency_id)._migrated_entity
if a_agency != b_agency:
raise MergeError('agency must be the same')
return a_agency.agency_id | [
"def",
"_MergeSameAgency",
"(",
"self",
",",
"a_agency_id",
",",
"b_agency_id",
")",
":",
"a_agency_id",
"=",
"(",
"a_agency_id",
"or",
"self",
".",
"feed_merger",
".",
"a_schedule",
".",
"GetDefaultAgency",
"(",
")",
".",
"agency_id",
")",
"b_agency_id",
"=",... | Merge agency ids to the corresponding agency id in the merged schedule.
Args:
a_agency_id: an agency id from the old schedule
b_agency_id: an agency id from the new schedule
Returns:
The agency id of the corresponding merged agency.
Raises:
MergeError: If a_agency_id and b_agency_id do not correspond to the same
merged agency.
KeyError: Either aaid or baid is not a valid agency id. | [
"Merge",
"agency",
"ids",
"to",
"the",
"corresponding",
"agency",
"id",
"in",
"the",
"merged",
"schedule",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L460-L485 | train | 220,005 |
google/transitfeed | merge.py | DataSetMerger._SchemedMerge | def _SchemedMerge(self, scheme, a, b):
"""Tries to merge two entities according to a merge scheme.
A scheme is specified by a map where the keys are entity attributes and the
values are merge functions like Merger._MergeIdentical or
Merger._MergeOptional. The entity is first migrated to the merged schedule.
Then the attributes are individually merged as specified by the scheme.
Args:
scheme: The merge scheme, a map from entity attributes to merge
functions.
a: The entity from the old schedule.
b: The entity from the new schedule.
Returns:
The migrated and merged entity.
Raises:
MergeError: One of the attributes was not able to be merged.
"""
migrated = self._Migrate(b, self.feed_merger.b_schedule, False)
for attr, merger in scheme.items():
a_attr = getattr(a, attr, None)
b_attr = getattr(b, attr, None)
try:
merged_attr = merger(a_attr, b_attr)
except MergeError as merge_error:
raise MergeError("Attribute '%s' could not be merged: %s." % (
attr, merge_error))
setattr(migrated, attr, merged_attr)
return migrated | python | def _SchemedMerge(self, scheme, a, b):
"""Tries to merge two entities according to a merge scheme.
A scheme is specified by a map where the keys are entity attributes and the
values are merge functions like Merger._MergeIdentical or
Merger._MergeOptional. The entity is first migrated to the merged schedule.
Then the attributes are individually merged as specified by the scheme.
Args:
scheme: The merge scheme, a map from entity attributes to merge
functions.
a: The entity from the old schedule.
b: The entity from the new schedule.
Returns:
The migrated and merged entity.
Raises:
MergeError: One of the attributes was not able to be merged.
"""
migrated = self._Migrate(b, self.feed_merger.b_schedule, False)
for attr, merger in scheme.items():
a_attr = getattr(a, attr, None)
b_attr = getattr(b, attr, None)
try:
merged_attr = merger(a_attr, b_attr)
except MergeError as merge_error:
raise MergeError("Attribute '%s' could not be merged: %s." % (
attr, merge_error))
setattr(migrated, attr, merged_attr)
return migrated | [
"def",
"_SchemedMerge",
"(",
"self",
",",
"scheme",
",",
"a",
",",
"b",
")",
":",
"migrated",
"=",
"self",
".",
"_Migrate",
"(",
"b",
",",
"self",
".",
"feed_merger",
".",
"b_schedule",
",",
"False",
")",
"for",
"attr",
",",
"merger",
"in",
"scheme",... | Tries to merge two entities according to a merge scheme.
A scheme is specified by a map where the keys are entity attributes and the
values are merge functions like Merger._MergeIdentical or
Merger._MergeOptional. The entity is first migrated to the merged schedule.
Then the attributes are individually merged as specified by the scheme.
Args:
scheme: The merge scheme, a map from entity attributes to merge
functions.
a: The entity from the old schedule.
b: The entity from the new schedule.
Returns:
The migrated and merged entity.
Raises:
MergeError: One of the attributes was not able to be merged. | [
"Tries",
"to",
"merge",
"two",
"entities",
"according",
"to",
"a",
"merge",
"scheme",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L487-L517 | train | 220,006 |
google/transitfeed | merge.py | DataSetMerger._MergeSameId | def _MergeSameId(self):
"""Tries to merge entities based on their ids.
This tries to merge only the entities from the old and new schedules which
have the same id. These are added into the merged schedule. Entities which
do not merge or do not have the same id as another entity in the other
schedule are simply migrated into the merged schedule.
This method is less flexible than _MergeDifferentId since it only tries
to merge entities which have the same id while _MergeDifferentId tries to
merge everything. However, it is faster and so should be used whenever
possible.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
a_not_merged = []
b_not_merged = []
for a in self._GetIter(self.feed_merger.a_schedule):
try:
b = self._GetById(self.feed_merger.b_schedule, self._GetId(a))
except KeyError:
# there was no entity in B with the same id as a
a_not_merged.append(a)
continue
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError as merge_error:
a_not_merged.append(a)
b_not_merged.append(b)
self._ReportSameIdButNotMerged(self._GetId(a), merge_error)
for b in self._GetIter(self.feed_merger.b_schedule):
try:
a = self._GetById(self.feed_merger.a_schedule, self._GetId(b))
except KeyError:
# there was no entity in A with the same id as b
b_not_merged.append(b)
# migrate the remaining entities
for a in a_not_merged:
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in b_not_merged:
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid))
self._num_not_merged_a = len(a_not_merged)
self._num_not_merged_b = len(b_not_merged)
return self._num_merged | python | def _MergeSameId(self):
"""Tries to merge entities based on their ids.
This tries to merge only the entities from the old and new schedules which
have the same id. These are added into the merged schedule. Entities which
do not merge or do not have the same id as another entity in the other
schedule are simply migrated into the merged schedule.
This method is less flexible than _MergeDifferentId since it only tries
to merge entities which have the same id while _MergeDifferentId tries to
merge everything. However, it is faster and so should be used whenever
possible.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
a_not_merged = []
b_not_merged = []
for a in self._GetIter(self.feed_merger.a_schedule):
try:
b = self._GetById(self.feed_merger.b_schedule, self._GetId(a))
except KeyError:
# there was no entity in B with the same id as a
a_not_merged.append(a)
continue
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError as merge_error:
a_not_merged.append(a)
b_not_merged.append(b)
self._ReportSameIdButNotMerged(self._GetId(a), merge_error)
for b in self._GetIter(self.feed_merger.b_schedule):
try:
a = self._GetById(self.feed_merger.a_schedule, self._GetId(b))
except KeyError:
# there was no entity in A with the same id as b
b_not_merged.append(b)
# migrate the remaining entities
for a in a_not_merged:
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in b_not_merged:
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid))
self._num_not_merged_a = len(a_not_merged)
self._num_not_merged_b = len(b_not_merged)
return self._num_merged | [
"def",
"_MergeSameId",
"(",
"self",
")",
":",
"a_not_merged",
"=",
"[",
"]",
"b_not_merged",
"=",
"[",
"]",
"for",
"a",
"in",
"self",
".",
"_GetIter",
"(",
"self",
".",
"feed_merger",
".",
"a_schedule",
")",
":",
"try",
":",
"b",
"=",
"self",
".",
... | Tries to merge entities based on their ids.
This tries to merge only the entities from the old and new schedules which
have the same id. These are added into the merged schedule. Entities which
do not merge or do not have the same id as another entity in the other
schedule are simply migrated into the merged schedule.
This method is less flexible than _MergeDifferentId since it only tries
to merge entities which have the same id while _MergeDifferentId tries to
merge everything. However, it is faster and so should be used whenever
possible.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities. | [
"Tries",
"to",
"merge",
"entities",
"based",
"on",
"their",
"ids",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L519-L575 | train | 220,007 |
google/transitfeed | merge.py | DataSetMerger._MergeDifferentId | def _MergeDifferentId(self):
"""Tries to merge all possible combinations of entities.
This tries to merge every entity in the old schedule with every entity in
the new schedule. Unlike _MergeSameId, the ids do not need to match.
However, _MergeDifferentId is much slower than _MergeSameId.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
# TODO: The same entity from A could merge with multiple from B.
# This should either generate an error or should be prevented from
# happening.
for a in self._GetIter(self.feed_merger.a_schedule):
for b in self._GetIter(self.feed_merger.b_schedule):
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError:
continue
for a in self._GetIter(self.feed_merger.a_schedule):
if a not in self.feed_merger.a_merge_map:
self._num_not_merged_a += 1
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None,
self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in self._GetIter(self.feed_merger.b_schedule):
if b not in self.feed_merger.b_merge_map:
self._num_not_merged_b += 1
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b,
self._Migrate(b, self.feed_merger.b_schedule, newid))
return self._num_merged | python | def _MergeDifferentId(self):
"""Tries to merge all possible combinations of entities.
This tries to merge every entity in the old schedule with every entity in
the new schedule. Unlike _MergeSameId, the ids do not need to match.
However, _MergeDifferentId is much slower than _MergeSameId.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities.
"""
# TODO: The same entity from A could merge with multiple from B.
# This should either generate an error or should be prevented from
# happening.
for a in self._GetIter(self.feed_merger.a_schedule):
for b in self._GetIter(self.feed_merger.b_schedule):
try:
self._Add(a, b, self._MergeEntities(a, b))
self._num_merged += 1
except MergeError:
continue
for a in self._GetIter(self.feed_merger.a_schedule):
if a not in self.feed_merger.a_merge_map:
self._num_not_merged_a += 1
newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a))
self._Add(a, None,
self._Migrate(a, self.feed_merger.a_schedule, newid))
for b in self._GetIter(self.feed_merger.b_schedule):
if b not in self.feed_merger.b_merge_map:
self._num_not_merged_b += 1
newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b))
self._Add(None, b,
self._Migrate(b, self.feed_merger.b_schedule, newid))
return self._num_merged | [
"def",
"_MergeDifferentId",
"(",
"self",
")",
":",
"# TODO: The same entity from A could merge with multiple from B.",
"# This should either generate an error or should be prevented from",
"# happening.",
"for",
"a",
"in",
"self",
".",
"_GetIter",
"(",
"self",
".",
"feed_merger",... | Tries to merge all possible combinations of entities.
This tries to merge every entity in the old schedule with every entity in
the new schedule. Unlike _MergeSameId, the ids do not need to match.
However, _MergeDifferentId is much slower than _MergeSameId.
This method makes use of various methods like _Merge and _Migrate which
are not implemented in the abstract DataSetMerger class. These method
should be overwritten in a subclass to allow _MergeSameId to work with
different entity types.
Returns:
The number of merged entities. | [
"Tries",
"to",
"merge",
"all",
"possible",
"combinations",
"of",
"entities",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L618-L657 | train | 220,008 |
google/transitfeed | merge.py | DataSetMerger._ReportSameIdButNotMerged | def _ReportSameIdButNotMerged(self, entity_id, reason):
"""Report that two entities have the same id but could not be merged.
Args:
entity_id: The id of the entities.
reason: A string giving a reason why they could not be merged.
"""
self.feed_merger.problem_reporter.SameIdButNotMerged(self,
entity_id,
reason) | python | def _ReportSameIdButNotMerged(self, entity_id, reason):
"""Report that two entities have the same id but could not be merged.
Args:
entity_id: The id of the entities.
reason: A string giving a reason why they could not be merged.
"""
self.feed_merger.problem_reporter.SameIdButNotMerged(self,
entity_id,
reason) | [
"def",
"_ReportSameIdButNotMerged",
"(",
"self",
",",
"entity_id",
",",
"reason",
")",
":",
"self",
".",
"feed_merger",
".",
"problem_reporter",
".",
"SameIdButNotMerged",
"(",
"self",
",",
"entity_id",
",",
"reason",
")"
] | Report that two entities have the same id but could not be merged.
Args:
entity_id: The id of the entities.
reason: A string giving a reason why they could not be merged. | [
"Report",
"that",
"two",
"entities",
"have",
"the",
"same",
"id",
"but",
"could",
"not",
"be",
"merged",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L659-L668 | train | 220,009 |
google/transitfeed | merge.py | DataSetMerger._HasId | def _HasId(self, schedule, entity_id):
"""Check if the schedule has an entity with the given id.
Args:
schedule: The transitfeed.Schedule instance to look in.
entity_id: The id of the entity.
Returns:
True if the schedule has an entity with the id or False if not.
"""
try:
self._GetById(schedule, entity_id)
has = True
except KeyError:
has = False
return has | python | def _HasId(self, schedule, entity_id):
"""Check if the schedule has an entity with the given id.
Args:
schedule: The transitfeed.Schedule instance to look in.
entity_id: The id of the entity.
Returns:
True if the schedule has an entity with the id or False if not.
"""
try:
self._GetById(schedule, entity_id)
has = True
except KeyError:
has = False
return has | [
"def",
"_HasId",
"(",
"self",
",",
"schedule",
",",
"entity_id",
")",
":",
"try",
":",
"self",
".",
"_GetById",
"(",
"schedule",
",",
"entity_id",
")",
"has",
"=",
"True",
"except",
"KeyError",
":",
"has",
"=",
"False",
"return",
"has"
] | Check if the schedule has an entity with the given id.
Args:
schedule: The transitfeed.Schedule instance to look in.
entity_id: The id of the entity.
Returns:
True if the schedule has an entity with the id or False if not. | [
"Check",
"if",
"the",
"schedule",
"has",
"an",
"entity",
"with",
"the",
"given",
"id",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L708-L723 | train | 220,010 |
google/transitfeed | merge.py | AgencyMerger._MergeEntities | def _MergeEntities(self, a, b):
"""Merges two agencies.
To be merged, they are required to have the same id, name, url and
timezone. The remaining language attribute is taken from the new agency.
Args:
a: The first agency.
b: The second agency.
Returns:
The merged agency.
Raises:
MergeError: The agencies could not be merged.
"""
def _MergeAgencyId(a_agency_id, b_agency_id):
"""Merge two agency ids.
The only difference between this and _MergeIdentical() is that the values
None and '' are regarded as being the same.
Args:
a_agency_id: The first agency id.
b_agency_id: The second agency id.
Returns:
The merged agency id.
Raises:
MergeError: The agency ids could not be merged.
"""
a_agency_id = a_agency_id or None
b_agency_id = b_agency_id or None
return self._MergeIdentical(a_agency_id, b_agency_id)
scheme = {'agency_id': _MergeAgencyId,
'agency_name': self._MergeIdentical,
'agency_url': self._MergeIdentical,
'agency_timezone': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | python | def _MergeEntities(self, a, b):
"""Merges two agencies.
To be merged, they are required to have the same id, name, url and
timezone. The remaining language attribute is taken from the new agency.
Args:
a: The first agency.
b: The second agency.
Returns:
The merged agency.
Raises:
MergeError: The agencies could not be merged.
"""
def _MergeAgencyId(a_agency_id, b_agency_id):
"""Merge two agency ids.
The only difference between this and _MergeIdentical() is that the values
None and '' are regarded as being the same.
Args:
a_agency_id: The first agency id.
b_agency_id: The second agency id.
Returns:
The merged agency id.
Raises:
MergeError: The agency ids could not be merged.
"""
a_agency_id = a_agency_id or None
b_agency_id = b_agency_id or None
return self._MergeIdentical(a_agency_id, b_agency_id)
scheme = {'agency_id': _MergeAgencyId,
'agency_name': self._MergeIdentical,
'agency_url': self._MergeIdentical,
'agency_timezone': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | [
"def",
"_MergeEntities",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"def",
"_MergeAgencyId",
"(",
"a_agency_id",
",",
"b_agency_id",
")",
":",
"\"\"\"Merge two agency ids.\n\n The only difference between this and _MergeIdentical() is that the values\n None and '' are re... | Merges two agencies.
To be merged, they are required to have the same id, name, url and
timezone. The remaining language attribute is taken from the new agency.
Args:
a: The first agency.
b: The second agency.
Returns:
The merged agency.
Raises:
MergeError: The agencies could not be merged. | [
"Merges",
"two",
"agencies",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L841-L882 | train | 220,011 |
google/transitfeed | merge.py | StopMerger._MergeEntities | def _MergeEntities(self, a, b):
"""Merges two stops.
For the stops to be merged, they must have:
- the same stop_id
- the same stop_name (case insensitive)
- the same zone_id
- locations less than largest_stop_distance apart
The other attributes can have arbitary changes. The merged attributes are
taken from the new stop.
Args:
a: The first stop.
b: The second stop.
Returns:
The merged stop.
Raises:
MergeError: The stops could not be merged.
"""
distance = transitfeed.ApproximateDistanceBetweenStops(a, b)
if distance > self.largest_stop_distance:
raise MergeError("Stops are too far apart: %.1fm "
"(largest_stop_distance is %.1fm)." %
(distance, self.largest_stop_distance))
scheme = {'stop_id': self._MergeIdentical,
'stop_name': self._MergeIdenticalCaseInsensitive,
'zone_id': self._MergeIdentical,
'location_type': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | python | def _MergeEntities(self, a, b):
"""Merges two stops.
For the stops to be merged, they must have:
- the same stop_id
- the same stop_name (case insensitive)
- the same zone_id
- locations less than largest_stop_distance apart
The other attributes can have arbitary changes. The merged attributes are
taken from the new stop.
Args:
a: The first stop.
b: The second stop.
Returns:
The merged stop.
Raises:
MergeError: The stops could not be merged.
"""
distance = transitfeed.ApproximateDistanceBetweenStops(a, b)
if distance > self.largest_stop_distance:
raise MergeError("Stops are too far apart: %.1fm "
"(largest_stop_distance is %.1fm)." %
(distance, self.largest_stop_distance))
scheme = {'stop_id': self._MergeIdentical,
'stop_name': self._MergeIdenticalCaseInsensitive,
'zone_id': self._MergeIdentical,
'location_type': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | [
"def",
"_MergeEntities",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"distance",
"=",
"transitfeed",
".",
"ApproximateDistanceBetweenStops",
"(",
"a",
",",
"b",
")",
"if",
"distance",
">",
"self",
".",
"largest_stop_distance",
":",
"raise",
"MergeError",
"(",... | Merges two stops.
For the stops to be merged, they must have:
- the same stop_id
- the same stop_name (case insensitive)
- the same zone_id
- locations less than largest_stop_distance apart
The other attributes can have arbitary changes. The merged attributes are
taken from the new stop.
Args:
a: The first stop.
b: The second stop.
Returns:
The merged stop.
Raises:
MergeError: The stops could not be merged. | [
"Merges",
"two",
"stops",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L932-L962 | train | 220,012 |
google/transitfeed | merge.py | StopMerger._UpdateAndMigrateUnmerged | def _UpdateAndMigrateUnmerged(self, not_merged_stops, zone_map, merge_map,
schedule):
"""Correct references in migrated unmerged stops and add to merged_schedule.
For stops migrated from one of the input feeds to the output feed update the
parent_station and zone_id references to point to objects in the output
feed. Then add the migrated stop to the new schedule.
Args:
not_merged_stops: list of stops from one input feed that have not been
merged
zone_map: map from zone_id in the input feed to zone_id in the output feed
merge_map: map from Stop objects in the input feed to Stop objects in
the output feed
schedule: the input Schedule object
"""
# for the unmerged stops, we use an already mapped zone_id if possible
# if not, we generate a new one and add it to the map
for stop, migrated_stop in not_merged_stops:
if stop.zone_id in zone_map:
migrated_stop.zone_id = zone_map[stop.zone_id]
else:
migrated_stop.zone_id = self.feed_merger.GenerateId(stop.zone_id)
zone_map[stop.zone_id] = migrated_stop.zone_id
if stop.parent_station:
parent_original = schedule.GetStop(stop.parent_station)
migrated_stop.parent_station = merge_map[parent_original].stop_id
self.feed_merger.merged_schedule.AddStopObject(migrated_stop) | python | def _UpdateAndMigrateUnmerged(self, not_merged_stops, zone_map, merge_map,
schedule):
"""Correct references in migrated unmerged stops and add to merged_schedule.
For stops migrated from one of the input feeds to the output feed update the
parent_station and zone_id references to point to objects in the output
feed. Then add the migrated stop to the new schedule.
Args:
not_merged_stops: list of stops from one input feed that have not been
merged
zone_map: map from zone_id in the input feed to zone_id in the output feed
merge_map: map from Stop objects in the input feed to Stop objects in
the output feed
schedule: the input Schedule object
"""
# for the unmerged stops, we use an already mapped zone_id if possible
# if not, we generate a new one and add it to the map
for stop, migrated_stop in not_merged_stops:
if stop.zone_id in zone_map:
migrated_stop.zone_id = zone_map[stop.zone_id]
else:
migrated_stop.zone_id = self.feed_merger.GenerateId(stop.zone_id)
zone_map[stop.zone_id] = migrated_stop.zone_id
if stop.parent_station:
parent_original = schedule.GetStop(stop.parent_station)
migrated_stop.parent_station = merge_map[parent_original].stop_id
self.feed_merger.merged_schedule.AddStopObject(migrated_stop) | [
"def",
"_UpdateAndMigrateUnmerged",
"(",
"self",
",",
"not_merged_stops",
",",
"zone_map",
",",
"merge_map",
",",
"schedule",
")",
":",
"# for the unmerged stops, we use an already mapped zone_id if possible",
"# if not, we generate a new one and add it to the map",
"for",
"stop",
... | Correct references in migrated unmerged stops and add to merged_schedule.
For stops migrated from one of the input feeds to the output feed update the
parent_station and zone_id references to point to objects in the output
feed. Then add the migrated stop to the new schedule.
Args:
not_merged_stops: list of stops from one input feed that have not been
merged
zone_map: map from zone_id in the input feed to zone_id in the output feed
merge_map: map from Stop objects in the input feed to Stop objects in
the output feed
schedule: the input Schedule object | [
"Correct",
"references",
"in",
"migrated",
"unmerged",
"stops",
"and",
"add",
"to",
"merged_schedule",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1014-L1041 | train | 220,013 |
google/transitfeed | merge.py | ServicePeriodMerger.DisjoinCalendars | def DisjoinCalendars(self, cutoff):
"""Forces the old and new calendars to be disjoint about a cutoff date.
This truncates the service periods of the old schedule so that service
stops one day before the given cutoff date and truncates the new schedule
so that service only begins on the cutoff date.
Args:
cutoff: The cutoff date as a string in YYYYMMDD format. The timezone
is the same as used in the calendar.txt file.
"""
def TruncatePeriod(service_period, start, end):
"""Truncate the service period to into the range [start, end].
Args:
service_period: The service period to truncate.
start: The start date as a string in YYYYMMDD format.
end: The end date as a string in YYYYMMDD format.
"""
service_period.start_date = max(service_period.start_date, start)
service_period.end_date = min(service_period.end_date, end)
dates_to_delete = []
for k in service_period.date_exceptions:
if (k < start) or (k > end):
dates_to_delete.append(k)
for k in dates_to_delete:
del service_period.date_exceptions[k]
# find the date one day before cutoff
year = int(cutoff[:4])
month = int(cutoff[4:6])
day = int(cutoff[6:8])
cutoff_date = datetime.date(year, month, day)
one_day_delta = datetime.timedelta(days=1)
before = (cutoff_date - one_day_delta).strftime('%Y%m%d')
for a in self.feed_merger.a_schedule.GetServicePeriodList():
TruncatePeriod(a, 0, before)
for b in self.feed_merger.b_schedule.GetServicePeriodList():
TruncatePeriod(b, cutoff, '9'*8) | python | def DisjoinCalendars(self, cutoff):
"""Forces the old and new calendars to be disjoint about a cutoff date.
This truncates the service periods of the old schedule so that service
stops one day before the given cutoff date and truncates the new schedule
so that service only begins on the cutoff date.
Args:
cutoff: The cutoff date as a string in YYYYMMDD format. The timezone
is the same as used in the calendar.txt file.
"""
def TruncatePeriod(service_period, start, end):
"""Truncate the service period to into the range [start, end].
Args:
service_period: The service period to truncate.
start: The start date as a string in YYYYMMDD format.
end: The end date as a string in YYYYMMDD format.
"""
service_period.start_date = max(service_period.start_date, start)
service_period.end_date = min(service_period.end_date, end)
dates_to_delete = []
for k in service_period.date_exceptions:
if (k < start) or (k > end):
dates_to_delete.append(k)
for k in dates_to_delete:
del service_period.date_exceptions[k]
# find the date one day before cutoff
year = int(cutoff[:4])
month = int(cutoff[4:6])
day = int(cutoff[6:8])
cutoff_date = datetime.date(year, month, day)
one_day_delta = datetime.timedelta(days=1)
before = (cutoff_date - one_day_delta).strftime('%Y%m%d')
for a in self.feed_merger.a_schedule.GetServicePeriodList():
TruncatePeriod(a, 0, before)
for b in self.feed_merger.b_schedule.GetServicePeriodList():
TruncatePeriod(b, cutoff, '9'*8) | [
"def",
"DisjoinCalendars",
"(",
"self",
",",
"cutoff",
")",
":",
"def",
"TruncatePeriod",
"(",
"service_period",
",",
"start",
",",
"end",
")",
":",
"\"\"\"Truncate the service period to into the range [start, end].\n\n Args:\n service_period: The service period to tr... | Forces the old and new calendars to be disjoint about a cutoff date.
This truncates the service periods of the old schedule so that service
stops one day before the given cutoff date and truncates the new schedule
so that service only begins on the cutoff date.
Args:
cutoff: The cutoff date as a string in YYYYMMDD format. The timezone
is the same as used in the calendar.txt file. | [
"Forces",
"the",
"old",
"and",
"new",
"calendars",
"to",
"be",
"disjoint",
"about",
"a",
"cutoff",
"date",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1166-L1206 | train | 220,014 |
google/transitfeed | merge.py | ServicePeriodMerger.CheckDisjointCalendars | def CheckDisjointCalendars(self):
"""Check whether any old service periods intersect with any new ones.
This is a rather coarse check based on
transitfeed.SevicePeriod.GetDateRange.
Returns:
True if the calendars are disjoint or False if not.
"""
# TODO: Do an exact check here.
a_service_periods = self.feed_merger.a_schedule.GetServicePeriodList()
b_service_periods = self.feed_merger.b_schedule.GetServicePeriodList()
for a_service_period in a_service_periods:
a_start, a_end = a_service_period.GetDateRange()
for b_service_period in b_service_periods:
b_start, b_end = b_service_period.GetDateRange()
overlap_start = max(a_start, b_start)
overlap_end = min(a_end, b_end)
if overlap_end >= overlap_start:
return False
return True | python | def CheckDisjointCalendars(self):
"""Check whether any old service periods intersect with any new ones.
This is a rather coarse check based on
transitfeed.SevicePeriod.GetDateRange.
Returns:
True if the calendars are disjoint or False if not.
"""
# TODO: Do an exact check here.
a_service_periods = self.feed_merger.a_schedule.GetServicePeriodList()
b_service_periods = self.feed_merger.b_schedule.GetServicePeriodList()
for a_service_period in a_service_periods:
a_start, a_end = a_service_period.GetDateRange()
for b_service_period in b_service_periods:
b_start, b_end = b_service_period.GetDateRange()
overlap_start = max(a_start, b_start)
overlap_end = min(a_end, b_end)
if overlap_end >= overlap_start:
return False
return True | [
"def",
"CheckDisjointCalendars",
"(",
"self",
")",
":",
"# TODO: Do an exact check here.",
"a_service_periods",
"=",
"self",
".",
"feed_merger",
".",
"a_schedule",
".",
"GetServicePeriodList",
"(",
")",
"b_service_periods",
"=",
"self",
".",
"feed_merger",
".",
"b_sch... | Check whether any old service periods intersect with any new ones.
This is a rather coarse check based on
transitfeed.SevicePeriod.GetDateRange.
Returns:
True if the calendars are disjoint or False if not. | [
"Check",
"whether",
"any",
"old",
"service",
"periods",
"intersect",
"with",
"any",
"new",
"ones",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1208-L1230 | train | 220,015 |
google/transitfeed | merge.py | FareMerger._MergeEntities | def _MergeEntities(self, a, b):
"""Merges the fares if all the attributes are the same."""
scheme = {'price': self._MergeIdentical,
'currency_type': self._MergeIdentical,
'payment_method': self._MergeIdentical,
'transfers': self._MergeIdentical,
'transfer_duration': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | python | def _MergeEntities(self, a, b):
"""Merges the fares if all the attributes are the same."""
scheme = {'price': self._MergeIdentical,
'currency_type': self._MergeIdentical,
'payment_method': self._MergeIdentical,
'transfers': self._MergeIdentical,
'transfer_duration': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | [
"def",
"_MergeEntities",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"scheme",
"=",
"{",
"'price'",
":",
"self",
".",
"_MergeIdentical",
",",
"'currency_type'",
":",
"self",
".",
"_MergeIdentical",
",",
"'payment_method'",
":",
"self",
".",
"_MergeIdentical",... | Merges the fares if all the attributes are the same. | [
"Merges",
"the",
"fares",
"if",
"all",
"the",
"attributes",
"are",
"the",
"same",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1249-L1256 | train | 220,016 |
google/transitfeed | merge.py | ShapeMerger._MergeEntities | def _MergeEntities(self, a, b):
"""Merges the shapes by taking the new shape.
Args:
a: The first transitfeed.Shape instance.
b: The second transitfeed.Shape instance.
Returns:
The merged shape.
Raises:
MergeError: If the ids are different or if the endpoints are further
than largest_shape_distance apart.
"""
if a.shape_id != b.shape_id:
raise MergeError('shape_id must be the same')
distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2],
b.points[0][:2]),
ApproximateDistanceBetweenPoints(a.points[-1][:2],
b.points[-1][:2]))
if distance > self.largest_shape_distance:
raise MergeError('The shape endpoints are too far away: %.1fm '
'(largest_shape_distance is %.1fm)' %
(distance, self.largest_shape_distance))
return self._Migrate(b, self.feed_merger.b_schedule, False) | python | def _MergeEntities(self, a, b):
"""Merges the shapes by taking the new shape.
Args:
a: The first transitfeed.Shape instance.
b: The second transitfeed.Shape instance.
Returns:
The merged shape.
Raises:
MergeError: If the ids are different or if the endpoints are further
than largest_shape_distance apart.
"""
if a.shape_id != b.shape_id:
raise MergeError('shape_id must be the same')
distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2],
b.points[0][:2]),
ApproximateDistanceBetweenPoints(a.points[-1][:2],
b.points[-1][:2]))
if distance > self.largest_shape_distance:
raise MergeError('The shape endpoints are too far away: %.1fm '
'(largest_shape_distance is %.1fm)' %
(distance, self.largest_shape_distance))
return self._Migrate(b, self.feed_merger.b_schedule, False) | [
"def",
"_MergeEntities",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"a",
".",
"shape_id",
"!=",
"b",
".",
"shape_id",
":",
"raise",
"MergeError",
"(",
"'shape_id must be the same'",
")",
"distance",
"=",
"max",
"(",
"ApproximateDistanceBetweenPoints",
... | Merges the shapes by taking the new shape.
Args:
a: The first transitfeed.Shape instance.
b: The second transitfeed.Shape instance.
Returns:
The merged shape.
Raises:
MergeError: If the ids are different or if the endpoints are further
than largest_shape_distance apart. | [
"Merges",
"the",
"shapes",
"by",
"taking",
"the",
"new",
"shape",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1361-L1387 | train | 220,017 |
google/transitfeed | merge.py | FareRuleMerger.MergeDataSets | def MergeDataSets(self):
"""Merge the fare rule datasets.
The fare rules are first migrated. Merging is done by removing any
duplicate rules.
Returns:
True since fare rules can always be merged.
"""
rules = set()
for (schedule, merge_map, zone_map) in ([self.feed_merger.a_schedule,
self.feed_merger.a_merge_map,
self.feed_merger.a_zone_map],
[self.feed_merger.b_schedule,
self.feed_merger.b_merge_map,
self.feed_merger.b_zone_map]):
for fare in schedule.GetFareAttributeList():
for fare_rule in fare.GetFareRuleList():
fare_id = merge_map[
schedule.GetFareAttribute(fare_rule.fare_id)].fare_id
route_id = (fare_rule.route_id and
merge_map[schedule.GetRoute(fare_rule.route_id)].route_id)
origin_id = (fare_rule.origin_id and
zone_map[fare_rule.origin_id])
destination_id = (fare_rule.destination_id and
zone_map[fare_rule.destination_id])
contains_id = (fare_rule.contains_id and
zone_map[fare_rule.contains_id])
rules.add((fare_id, route_id, origin_id, destination_id,
contains_id))
for fare_rule_tuple in rules:
migrated_fare_rule = transitfeed.FareRule(*fare_rule_tuple)
self.feed_merger.merged_schedule.AddFareRuleObject(migrated_fare_rule)
if rules:
self.feed_merger.problem_reporter.FareRulesBroken(self)
print('Fare Rules: union has %d fare rules' % len(rules))
return True | python | def MergeDataSets(self):
"""Merge the fare rule datasets.
The fare rules are first migrated. Merging is done by removing any
duplicate rules.
Returns:
True since fare rules can always be merged.
"""
rules = set()
for (schedule, merge_map, zone_map) in ([self.feed_merger.a_schedule,
self.feed_merger.a_merge_map,
self.feed_merger.a_zone_map],
[self.feed_merger.b_schedule,
self.feed_merger.b_merge_map,
self.feed_merger.b_zone_map]):
for fare in schedule.GetFareAttributeList():
for fare_rule in fare.GetFareRuleList():
fare_id = merge_map[
schedule.GetFareAttribute(fare_rule.fare_id)].fare_id
route_id = (fare_rule.route_id and
merge_map[schedule.GetRoute(fare_rule.route_id)].route_id)
origin_id = (fare_rule.origin_id and
zone_map[fare_rule.origin_id])
destination_id = (fare_rule.destination_id and
zone_map[fare_rule.destination_id])
contains_id = (fare_rule.contains_id and
zone_map[fare_rule.contains_id])
rules.add((fare_id, route_id, origin_id, destination_id,
contains_id))
for fare_rule_tuple in rules:
migrated_fare_rule = transitfeed.FareRule(*fare_rule_tuple)
self.feed_merger.merged_schedule.AddFareRuleObject(migrated_fare_rule)
if rules:
self.feed_merger.problem_reporter.FareRulesBroken(self)
print('Fare Rules: union has %d fare rules' % len(rules))
return True | [
"def",
"MergeDataSets",
"(",
"self",
")",
":",
"rules",
"=",
"set",
"(",
")",
"for",
"(",
"schedule",
",",
"merge_map",
",",
"zone_map",
")",
"in",
"(",
"[",
"self",
".",
"feed_merger",
".",
"a_schedule",
",",
"self",
".",
"feed_merger",
".",
"a_merge_... | Merge the fare rule datasets.
The fare rules are first migrated. Merging is done by removing any
duplicate rules.
Returns:
True since fare rules can always be merged. | [
"Merge",
"the",
"fare",
"rule",
"datasets",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1510-L1547 | train | 220,018 |
google/transitfeed | merge.py | FeedMerger._FindLargestIdPostfixNumber | def _FindLargestIdPostfixNumber(self, schedule):
"""Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id.
"""
postfix_number_re = re.compile('(\d+)$')
def ExtractPostfixNumber(entity_id):
"""Try to extract an integer from the end of entity_id.
If entity_id is None or if there is no integer ending the id, zero is
returned.
Args:
entity_id: An id string or None.
Returns:
An integer ending the entity_id or zero.
"""
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {'agency_id': schedule.GetAgencyList(),
'stop_id': schedule.GetStopList(),
'route_id': schedule.GetRouteList(),
'trip_id': schedule.GetTripList(),
'service_id': schedule.GetServicePeriodList(),
'fare_id': schedule.GetFareAttributeList(),
'shape_id': schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number | python | def _FindLargestIdPostfixNumber(self, schedule):
"""Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id.
"""
postfix_number_re = re.compile('(\d+)$')
def ExtractPostfixNumber(entity_id):
"""Try to extract an integer from the end of entity_id.
If entity_id is None or if there is no integer ending the id, zero is
returned.
Args:
entity_id: An id string or None.
Returns:
An integer ending the entity_id or zero.
"""
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {'agency_id': schedule.GetAgencyList(),
'stop_id': schedule.GetStopList(),
'route_id': schedule.GetRouteList(),
'trip_id': schedule.GetTripList(),
'service_id': schedule.GetServicePeriodList(),
'fare_id': schedule.GetFareAttributeList(),
'shape_id': schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number | [
"def",
"_FindLargestIdPostfixNumber",
"(",
"self",
",",
"schedule",
")",
":",
"postfix_number_re",
"=",
"re",
".",
"compile",
"(",
"'(\\d+)$'",
")",
"def",
"ExtractPostfixNumber",
"(",
"entity_id",
")",
":",
"\"\"\"Try to extract an integer from the end of entity_id.\n\n ... | Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id. | [
"Finds",
"the",
"largest",
"integer",
"used",
"as",
"the",
"ending",
"of",
"an",
"id",
"in",
"the",
"schedule",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1597-L1642 | train | 220,019 |
google/transitfeed | merge.py | FeedMerger.GenerateId | def GenerateId(self, entity_id=None):
"""Generate a unique id based on the given id.
This is done by appending a counter which is then incremented. The
counter is initialised at the maximum number used as an ending for
any id in the old and new schedules.
Args:
entity_id: The base id string. This is allowed to be None.
Returns:
The generated id.
"""
self._idnum += 1
if entity_id:
return '%s_merged_%d' % (entity_id, self._idnum)
else:
return 'merged_%d' % self._idnum | python | def GenerateId(self, entity_id=None):
"""Generate a unique id based on the given id.
This is done by appending a counter which is then incremented. The
counter is initialised at the maximum number used as an ending for
any id in the old and new schedules.
Args:
entity_id: The base id string. This is allowed to be None.
Returns:
The generated id.
"""
self._idnum += 1
if entity_id:
return '%s_merged_%d' % (entity_id, self._idnum)
else:
return 'merged_%d' % self._idnum | [
"def",
"GenerateId",
"(",
"self",
",",
"entity_id",
"=",
"None",
")",
":",
"self",
".",
"_idnum",
"+=",
"1",
"if",
"entity_id",
":",
"return",
"'%s_merged_%d'",
"%",
"(",
"entity_id",
",",
"self",
".",
"_idnum",
")",
"else",
":",
"return",
"'merged_%d'",... | Generate a unique id based on the given id.
This is done by appending a counter which is then incremented. The
counter is initialised at the maximum number used as an ending for
any id in the old and new schedules.
Args:
entity_id: The base id string. This is allowed to be None.
Returns:
The generated id. | [
"Generate",
"a",
"unique",
"id",
"based",
"on",
"the",
"given",
"id",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1661-L1678 | train | 220,020 |
google/transitfeed | merge.py | FeedMerger.Register | def Register(self, a, b, migrated_entity):
"""Registers a merge mapping.
If a and b are both not None, this means that entities a and b were merged
to produce migrated_entity. If one of a or b are not None, then it means
it was not merged but simply migrated.
The effect of a call to register is to update a_merge_map and b_merge_map
according to the merge. Also the private attributes _migrated_entity of a
and b are set to migrated_entity.
Args:
a: The entity from the old feed or None.
b: The entity from the new feed or None.
migrated_entity: The migrated entity.
"""
# There are a few places where code needs to find the corresponding
# migrated entity of an object without knowing in which original schedule
# the entity started. With a_merge_map and b_merge_map both have to be
# checked. Use of the _migrated_entity attribute allows the migrated entity
# to be directly found without the schedule. The merge maps also require
# that all objects be hashable. GenericGTFSObject is at the moment, but
# this is a bug. See comment in transitfeed.GenericGTFSObject.
if a is not None:
self.a_merge_map[a] = migrated_entity
a._migrated_entity = migrated_entity
if b is not None:
self.b_merge_map[b] = migrated_entity
b._migrated_entity = migrated_entity | python | def Register(self, a, b, migrated_entity):
"""Registers a merge mapping.
If a and b are both not None, this means that entities a and b were merged
to produce migrated_entity. If one of a or b are not None, then it means
it was not merged but simply migrated.
The effect of a call to register is to update a_merge_map and b_merge_map
according to the merge. Also the private attributes _migrated_entity of a
and b are set to migrated_entity.
Args:
a: The entity from the old feed or None.
b: The entity from the new feed or None.
migrated_entity: The migrated entity.
"""
# There are a few places where code needs to find the corresponding
# migrated entity of an object without knowing in which original schedule
# the entity started. With a_merge_map and b_merge_map both have to be
# checked. Use of the _migrated_entity attribute allows the migrated entity
# to be directly found without the schedule. The merge maps also require
# that all objects be hashable. GenericGTFSObject is at the moment, but
# this is a bug. See comment in transitfeed.GenericGTFSObject.
if a is not None:
self.a_merge_map[a] = migrated_entity
a._migrated_entity = migrated_entity
if b is not None:
self.b_merge_map[b] = migrated_entity
b._migrated_entity = migrated_entity | [
"def",
"Register",
"(",
"self",
",",
"a",
",",
"b",
",",
"migrated_entity",
")",
":",
"# There are a few places where code needs to find the corresponding",
"# migrated entity of an object without knowing in which original schedule",
"# the entity started. With a_merge_map and b_merge_ma... | Registers a merge mapping.
If a and b are both not None, this means that entities a and b were merged
to produce migrated_entity. If one of a or b are not None, then it means
it was not merged but simply migrated.
The effect of a call to register is to update a_merge_map and b_merge_map
according to the merge. Also the private attributes _migrated_entity of a
and b are set to migrated_entity.
Args:
a: The entity from the old feed or None.
b: The entity from the new feed or None.
migrated_entity: The migrated entity. | [
"Registers",
"a",
"merge",
"mapping",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1680-L1708 | train | 220,021 |
google/transitfeed | merge.py | FeedMerger.AddDefaultMergers | def AddDefaultMergers(self):
"""Adds the default DataSetMergers defined in this module."""
self.AddMerger(AgencyMerger(self))
self.AddMerger(StopMerger(self))
self.AddMerger(RouteMerger(self))
self.AddMerger(ServicePeriodMerger(self))
self.AddMerger(FareMerger(self))
self.AddMerger(ShapeMerger(self))
self.AddMerger(TripMerger(self))
self.AddMerger(FareRuleMerger(self)) | python | def AddDefaultMergers(self):
"""Adds the default DataSetMergers defined in this module."""
self.AddMerger(AgencyMerger(self))
self.AddMerger(StopMerger(self))
self.AddMerger(RouteMerger(self))
self.AddMerger(ServicePeriodMerger(self))
self.AddMerger(FareMerger(self))
self.AddMerger(ShapeMerger(self))
self.AddMerger(TripMerger(self))
self.AddMerger(FareRuleMerger(self)) | [
"def",
"AddDefaultMergers",
"(",
"self",
")",
":",
"self",
".",
"AddMerger",
"(",
"AgencyMerger",
"(",
"self",
")",
")",
"self",
".",
"AddMerger",
"(",
"StopMerger",
"(",
"self",
")",
")",
"self",
".",
"AddMerger",
"(",
"RouteMerger",
"(",
"self",
")",
... | Adds the default DataSetMergers defined in this module. | [
"Adds",
"the",
"default",
"DataSetMergers",
"defined",
"in",
"this",
"module",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1718-L1727 | train | 220,022 |
google/transitfeed | merge.py | FeedMerger.GetMerger | def GetMerger(self, cls):
"""Looks for an added DataSetMerger derived from the given class.
Args:
cls: A class derived from DataSetMerger.
Returns:
The matching DataSetMerger instance.
Raises:
LookupError: No matching DataSetMerger has been added.
"""
for merger in self._mergers:
if isinstance(merger, cls):
return merger
raise LookupError('No matching DataSetMerger found') | python | def GetMerger(self, cls):
"""Looks for an added DataSetMerger derived from the given class.
Args:
cls: A class derived from DataSetMerger.
Returns:
The matching DataSetMerger instance.
Raises:
LookupError: No matching DataSetMerger has been added.
"""
for merger in self._mergers:
if isinstance(merger, cls):
return merger
raise LookupError('No matching DataSetMerger found') | [
"def",
"GetMerger",
"(",
"self",
",",
"cls",
")",
":",
"for",
"merger",
"in",
"self",
".",
"_mergers",
":",
"if",
"isinstance",
"(",
"merger",
",",
"cls",
")",
":",
"return",
"merger",
"raise",
"LookupError",
"(",
"'No matching DataSetMerger found'",
")"
] | Looks for an added DataSetMerger derived from the given class.
Args:
cls: A class derived from DataSetMerger.
Returns:
The matching DataSetMerger instance.
Raises:
LookupError: No matching DataSetMerger has been added. | [
"Looks",
"for",
"an",
"added",
"DataSetMerger",
"derived",
"from",
"the",
"given",
"class",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L1729-L1744 | train | 220,023 |
google/transitfeed | unusual_trip_filter.py | UnusualTripFilter.filter_line | def filter_line(self, route):
"""Mark unusual trips for the given route."""
if self._route_type is not None and self._route_type != route.route_type:
self.info('Skipping route %s due to different route_type value (%s)' %
(route['route_id'], route['route_type']))
return
self.info('Filtering infrequent trips for route %s.' % route.route_id)
trip_count = len(route.trips)
for pattern_id, pattern in route.GetPatternIdTripDict().items():
ratio = float(1.0 * len(pattern) / trip_count)
if not self._force:
if (ratio < self._threshold):
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as unusual (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
ratio))
for trip in pattern:
trip.trip_type = 1 # special
self.info("\t\tsetting trip_type of trip %s as special" %
trip.trip_id)
else:
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as %s (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
('regular', 'unusual')[ratio < self._threshold],
ratio))
for trip in pattern:
trip.trip_type = ('0','1')[ratio < self._threshold]
self.info("\t\tsetting trip_type of trip %s as %s" %
(trip.trip_id,
('regular', 'unusual')[ratio < self._threshold])) | python | def filter_line(self, route):
"""Mark unusual trips for the given route."""
if self._route_type is not None and self._route_type != route.route_type:
self.info('Skipping route %s due to different route_type value (%s)' %
(route['route_id'], route['route_type']))
return
self.info('Filtering infrequent trips for route %s.' % route.route_id)
trip_count = len(route.trips)
for pattern_id, pattern in route.GetPatternIdTripDict().items():
ratio = float(1.0 * len(pattern) / trip_count)
if not self._force:
if (ratio < self._threshold):
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as unusual (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
ratio))
for trip in pattern:
trip.trip_type = 1 # special
self.info("\t\tsetting trip_type of trip %s as special" %
trip.trip_id)
else:
self.info("\t%d trips on route %s with headsign '%s' recognized "
"as %s (ratio %f)" %
(len(pattern),
route['route_short_name'],
pattern[0]['trip_headsign'],
('regular', 'unusual')[ratio < self._threshold],
ratio))
for trip in pattern:
trip.trip_type = ('0','1')[ratio < self._threshold]
self.info("\t\tsetting trip_type of trip %s as %s" %
(trip.trip_id,
('regular', 'unusual')[ratio < self._threshold])) | [
"def",
"filter_line",
"(",
"self",
",",
"route",
")",
":",
"if",
"self",
".",
"_route_type",
"is",
"not",
"None",
"and",
"self",
".",
"_route_type",
"!=",
"route",
".",
"route_type",
":",
"self",
".",
"info",
"(",
"'Skipping route %s due to different route_typ... | Mark unusual trips for the given route. | [
"Mark",
"unusual",
"trips",
"for",
"the",
"given",
"route",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/unusual_trip_filter.py#L56-L90 | train | 220,024 |
google/transitfeed | unusual_trip_filter.py | UnusualTripFilter.filter | def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route) | python | def filter(self, dataset):
"""Mark unusual trips for all the routes in the dataset."""
self.info('Going to filter infrequent routes in the dataset')
for route in dataset.routes.values():
self.filter_line(route) | [
"def",
"filter",
"(",
"self",
",",
"dataset",
")",
":",
"self",
".",
"info",
"(",
"'Going to filter infrequent routes in the dataset'",
")",
"for",
"route",
"in",
"dataset",
".",
"routes",
".",
"values",
"(",
")",
":",
"self",
".",
"filter_line",
"(",
"route... | Mark unusual trips for all the routes in the dataset. | [
"Mark",
"unusual",
"trips",
"for",
"all",
"the",
"routes",
"in",
"the",
"dataset",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/unusual_trip_filter.py#L92-L96 | train | 220,025 |
google/transitfeed | schedule_viewer.py | StopToTuple | def StopToTuple(stop):
"""Return tuple as expected by javascript function addStopMarkerFromList"""
return (stop.stop_id, stop.stop_name, float(stop.stop_lat),
float(stop.stop_lon), stop.location_type) | python | def StopToTuple(stop):
"""Return tuple as expected by javascript function addStopMarkerFromList"""
return (stop.stop_id, stop.stop_name, float(stop.stop_lat),
float(stop.stop_lon), stop.location_type) | [
"def",
"StopToTuple",
"(",
"stop",
")",
":",
"return",
"(",
"stop",
".",
"stop_id",
",",
"stop",
".",
"stop_name",
",",
"float",
"(",
"stop",
".",
"stop_lat",
")",
",",
"float",
"(",
"stop",
".",
"stop_lon",
")",
",",
"stop",
".",
"location_type",
")... | Return tuple as expected by javascript function addStopMarkerFromList | [
"Return",
"tuple",
"as",
"expected",
"by",
"javascript",
"function",
"addStopMarkerFromList"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L89-L92 | train | 220,026 |
google/transitfeed | schedule_viewer.py | FindDefaultFileDir | def FindDefaultFileDir():
"""Return the path of the directory containing the static files. By default
the directory is called 'files'. The location depends on where setup.py put
it."""
base = FindPy2ExeBase()
if base:
return os.path.join(base, 'schedule_viewer_files')
else:
# For all other distributions 'files' is in the gtfsscheduleviewer
# directory.
base = os.path.dirname(gtfsscheduleviewer.__file__) # Strip __init__.py
return os.path.join(base, 'files') | python | def FindDefaultFileDir():
"""Return the path of the directory containing the static files. By default
the directory is called 'files'. The location depends on where setup.py put
it."""
base = FindPy2ExeBase()
if base:
return os.path.join(base, 'schedule_viewer_files')
else:
# For all other distributions 'files' is in the gtfsscheduleviewer
# directory.
base = os.path.dirname(gtfsscheduleviewer.__file__) # Strip __init__.py
return os.path.join(base, 'files') | [
"def",
"FindDefaultFileDir",
"(",
")",
":",
"base",
"=",
"FindPy2ExeBase",
"(",
")",
"if",
"base",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"'schedule_viewer_files'",
")",
"else",
":",
"# For all other distributions 'files' is in the gtfssc... | Return the path of the directory containing the static files. By default
the directory is called 'files'. The location depends on where setup.py put
it. | [
"Return",
"the",
"path",
"of",
"the",
"directory",
"containing",
"the",
"static",
"files",
".",
"By",
"default",
"the",
"directory",
"is",
"called",
"files",
".",
"The",
"location",
"depends",
"on",
"where",
"setup",
".",
"py",
"put",
"it",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L461-L472 | train | 220,027 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_routepatterns | def handle_json_GET_routepatterns(self, params):
"""Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips."""
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
if not route:
self.send_error(404)
return
time = int(params.get('time', 0))
date = params.get('date', "")
sample_size = 3 # For each pattern return the start time for this many trips
pattern_id_trip_dict = route.GetPatternIdTripDict()
patterns = []
for pattern_id, trips in pattern_id_trip_dict.items():
time_stops = trips[0].GetTimeStops()
if not time_stops:
continue
has_non_zero_trip_type = False;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = []
for trip in trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
trips_with_service.append(trip)
if trip['trip_type'] and trip['trip_type'] != '0':
has_non_zero_trip_type = True
# We're only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops))
transitfeed.SortListOfTripByTime(trips)
num_trips = len(trips)
if num_trips <= sample_size:
start_sample_index = 0
num_after_sample = 0
else:
# Will return sample_size trips that start after the 'time' param.
# Linear search because I couldn't find a built-in way to do a binary
# search with a custom key.
start_sample_index = len(trips)
for i, trip in enumerate(trips):
if trip.GetStartTime() >= time:
start_sample_index = i
break
num_after_sample = num_trips - (start_sample_index + sample_size)
if num_after_sample < 0:
# Less than sample_size trips start after 'time' so return all the
# last sample_size trips.
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = []
for t in trips[start_sample_index:start_sample_index + sample_size]:
sample.append( (t.GetStartTime(), t.trip_id) )
patterns.append((name, pattern_id, start_sample_index, sample,
num_after_sample, (0,1)[has_non_zero_trip_type]))
patterns.sort()
return patterns | python | def handle_json_GET_routepatterns(self, params):
"""Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips."""
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
if not route:
self.send_error(404)
return
time = int(params.get('time', 0))
date = params.get('date', "")
sample_size = 3 # For each pattern return the start time for this many trips
pattern_id_trip_dict = route.GetPatternIdTripDict()
patterns = []
for pattern_id, trips in pattern_id_trip_dict.items():
time_stops = trips[0].GetTimeStops()
if not time_stops:
continue
has_non_zero_trip_type = False;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = []
for trip in trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
trips_with_service.append(trip)
if trip['trip_type'] and trip['trip_type'] != '0':
has_non_zero_trip_type = True
# We're only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops))
transitfeed.SortListOfTripByTime(trips)
num_trips = len(trips)
if num_trips <= sample_size:
start_sample_index = 0
num_after_sample = 0
else:
# Will return sample_size trips that start after the 'time' param.
# Linear search because I couldn't find a built-in way to do a binary
# search with a custom key.
start_sample_index = len(trips)
for i, trip in enumerate(trips):
if trip.GetStartTime() >= time:
start_sample_index = i
break
num_after_sample = num_trips - (start_sample_index + sample_size)
if num_after_sample < 0:
# Less than sample_size trips start after 'time' so return all the
# last sample_size trips.
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = []
for t in trips[start_sample_index:start_sample_index + sample_size]:
sample.append( (t.GetStartTime(), t.trip_id) )
patterns.append((name, pattern_id, start_sample_index, sample,
num_after_sample, (0,1)[has_non_zero_trip_type]))
patterns.sort()
return patterns | [
"def",
"handle_json_GET_routepatterns",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"route",
"=",
"schedule",
".",
"GetRoute",
"(",
"params",
".",
"get",
"(",
"'route'",
",",
"None",
")",
")",
"if",
"not... | Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips. | [
"Given",
"a",
"route_id",
"generate",
"a",
"list",
"of",
"patterns",
"of",
"the",
"route",
".",
"For",
"each",
"pattern",
"include",
"some",
"basic",
"information",
"and",
"a",
"few",
"sample",
"trips",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L187-L257 | train | 220,028 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_wrapper_GET | def handle_json_wrapper_GET(self, handler, parsed_params):
"""Call handler and output the return value in JSON."""
schedule = self.server.schedule
result = handler(parsed_params)
content = ResultEncoder().encode(result)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content) | python | def handle_json_wrapper_GET(self, handler, parsed_params):
"""Call handler and output the return value in JSON."""
schedule = self.server.schedule
result = handler(parsed_params)
content = ResultEncoder().encode(result)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content) | [
"def",
"handle_json_wrapper_GET",
"(",
"self",
",",
"handler",
",",
"parsed_params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"result",
"=",
"handler",
"(",
"parsed_params",
")",
"content",
"=",
"ResultEncoder",
"(",
")",
".",
"enco... | Call handler and output the return value in JSON. | [
"Call",
"handler",
"and",
"output",
"the",
"return",
"value",
"in",
"JSON",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L259-L268 | train | 220,029 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_routes | def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append( (r.route_id, r.route_short_name, r.route_long_name) )
result.sort(key = lambda x: x[1:3])
return result | python | def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append( (r.route_id, r.route_short_name, r.route_long_name) )
result.sort(key = lambda x: x[1:3])
return result | [
"def",
"handle_json_GET_routes",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"result",
"=",
"[",
"]",
"for",
"r",
"in",
"schedule",
".",
"GetRouteList",
"(",
")",
":",
"result",
".",
"append",
"(",
"("... | Return a list of all routes. | [
"Return",
"a",
"list",
"of",
"all",
"routes",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L270-L277 | train | 220,030 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_triprows | def handle_json_GET_triprows(self, params):
"""Return a list of rows from the feed file that are related to this
trip."""
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip', None))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
route = schedule.GetRoute(trip.route_id)
trip_row = dict(trip.iteritems())
route_row = dict(route.iteritems())
return [['trips.txt', trip_row], ['routes.txt', route_row]] | python | def handle_json_GET_triprows(self, params):
"""Return a list of rows from the feed file that are related to this
trip."""
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip', None))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
route = schedule.GetRoute(trip.route_id)
trip_row = dict(trip.iteritems())
route_row = dict(route.iteritems())
return [['trips.txt', trip_row], ['routes.txt', route_row]] | [
"def",
"handle_json_GET_triprows",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"try",
":",
"trip",
"=",
"schedule",
".",
"GetTrip",
"(",
"params",
".",
"get",
"(",
"'trip'",
",",
"None",
")",
")",
"exc... | Return a list of rows from the feed file that are related to this
trip. | [
"Return",
"a",
"list",
"of",
"rows",
"from",
"the",
"feed",
"file",
"that",
"are",
"related",
"to",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L284-L296 | train | 220,031 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_neareststops | def handle_json_GET_neareststops(self, params):
"""Return a list of the nearest 'limit' stops to 'lat', 'lon'"""
schedule = self.server.schedule
lat = float(params.get('lat'))
lon = float(params.get('lon'))
limit = int(params.get('limit'))
stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit)
return [StopToTuple(s) for s in stops] | python | def handle_json_GET_neareststops(self, params):
"""Return a list of the nearest 'limit' stops to 'lat', 'lon'"""
schedule = self.server.schedule
lat = float(params.get('lat'))
lon = float(params.get('lon'))
limit = int(params.get('limit'))
stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit)
return [StopToTuple(s) for s in stops] | [
"def",
"handle_json_GET_neareststops",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"lat",
"=",
"float",
"(",
"params",
".",
"get",
"(",
"'lat'",
")",
")",
"lon",
"=",
"float",
"(",
"params",
".",
"get"... | Return a list of the nearest 'limit' stops to 'lat', 'lon | [
"Return",
"a",
"list",
"of",
"the",
"nearest",
"limit",
"stops",
"to",
"lat",
"lon"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L337-L344 | train | 220,032 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_boundboxstops | def handle_json_GET_boundboxstops(self, params):
"""Return a list of up to 'limit' stops within bounding box with 'n','e'
and 's','w' in the NE and SW corners. Does not handle boxes crossing
longitude line 180."""
schedule = self.server.schedule
n = float(params.get('n'))
e = float(params.get('e'))
s = float(params.get('s'))
w = float(params.get('w'))
limit = int(params.get('limit'))
stops = schedule.GetStopsInBoundingBox(north=n, east=e, south=s, west=w, n=limit)
return [StopToTuple(s) for s in stops] | python | def handle_json_GET_boundboxstops(self, params):
"""Return a list of up to 'limit' stops within bounding box with 'n','e'
and 's','w' in the NE and SW corners. Does not handle boxes crossing
longitude line 180."""
schedule = self.server.schedule
n = float(params.get('n'))
e = float(params.get('e'))
s = float(params.get('s'))
w = float(params.get('w'))
limit = int(params.get('limit'))
stops = schedule.GetStopsInBoundingBox(north=n, east=e, south=s, west=w, n=limit)
return [StopToTuple(s) for s in stops] | [
"def",
"handle_json_GET_boundboxstops",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"n",
"=",
"float",
"(",
"params",
".",
"get",
"(",
"'n'",
")",
")",
"e",
"=",
"float",
"(",
"params",
".",
"get",
"... | Return a list of up to 'limit' stops within bounding box with 'n','e'
and 's','w' in the NE and SW corners. Does not handle boxes crossing
longitude line 180. | [
"Return",
"a",
"list",
"of",
"up",
"to",
"limit",
"stops",
"within",
"bounding",
"box",
"with",
"n",
"e",
"and",
"s",
"w",
"in",
"the",
"NE",
"and",
"SW",
"corners",
".",
"Does",
"not",
"handle",
"boxes",
"crossing",
"longitude",
"line",
"180",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L346-L357 | train | 220,033 |
google/transitfeed | schedule_viewer.py | ScheduleRequestHandler.handle_json_GET_stoptrips | def handle_json_GET_stoptrips(self, params):
"""Given a stop_id and time in seconds since midnight return the next
trips to visit the stop."""
schedule = self.server.schedule
stop = schedule.GetStop(params.get('stop', None))
time = int(params.get('time', 0))
date = params.get('date', "")
time_trips = stop.GetStopTimeTrips(schedule)
time_trips.sort() # OPT: use bisect.insort to make this O(N*ln(N)) -> O(N)
# Keep the first 5 after param 'time'.
# Need make a tuple to find correct bisect point
time_trips = time_trips[bisect.bisect_left(time_trips, (time, 0)):]
time_trips = time_trips[:5]
# TODO: combine times for a route to show next 2 departure times
result = []
for time, (trip, index), tp in time_trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
headsign = None
# Find the most recent headsign from the StopTime objects
for stoptime in trip.GetStopTimes()[index::-1]:
if stoptime.stop_headsign:
headsign = stoptime.stop_headsign
break
# If stop_headsign isn't found, look for a trip_headsign
if not headsign:
headsign = trip.trip_headsign
route = schedule.GetRoute(trip.route_id)
trip_name = ''
if route.route_short_name:
trip_name += route.route_short_name
if route.route_long_name:
if len(trip_name):
trip_name += " - "
trip_name += route.route_long_name
if headsign:
trip_name += " (Direction: %s)" % headsign
result.append((time, (trip.trip_id, trip_name, trip.service_id), tp))
return result | python | def handle_json_GET_stoptrips(self, params):
"""Given a stop_id and time in seconds since midnight return the next
trips to visit the stop."""
schedule = self.server.schedule
stop = schedule.GetStop(params.get('stop', None))
time = int(params.get('time', 0))
date = params.get('date', "")
time_trips = stop.GetStopTimeTrips(schedule)
time_trips.sort() # OPT: use bisect.insort to make this O(N*ln(N)) -> O(N)
# Keep the first 5 after param 'time'.
# Need make a tuple to find correct bisect point
time_trips = time_trips[bisect.bisect_left(time_trips, (time, 0)):]
time_trips = time_trips[:5]
# TODO: combine times for a route to show next 2 departure times
result = []
for time, (trip, index), tp in time_trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
headsign = None
# Find the most recent headsign from the StopTime objects
for stoptime in trip.GetStopTimes()[index::-1]:
if stoptime.stop_headsign:
headsign = stoptime.stop_headsign
break
# If stop_headsign isn't found, look for a trip_headsign
if not headsign:
headsign = trip.trip_headsign
route = schedule.GetRoute(trip.route_id)
trip_name = ''
if route.route_short_name:
trip_name += route.route_short_name
if route.route_long_name:
if len(trip_name):
trip_name += " - "
trip_name += route.route_long_name
if headsign:
trip_name += " (Direction: %s)" % headsign
result.append((time, (trip.trip_id, trip_name, trip.service_id), tp))
return result | [
"def",
"handle_json_GET_stoptrips",
"(",
"self",
",",
"params",
")",
":",
"schedule",
"=",
"self",
".",
"server",
".",
"schedule",
"stop",
"=",
"schedule",
".",
"GetStop",
"(",
"params",
".",
"get",
"(",
"'stop'",
",",
"None",
")",
")",
"time",
"=",
"i... | Given a stop_id and time in seconds since midnight return the next
trips to visit the stop. | [
"Given",
"a",
"stop_id",
"and",
"time",
"in",
"seconds",
"since",
"midnight",
"return",
"the",
"next",
"trips",
"to",
"visit",
"the",
"stop",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/schedule_viewer.py#L368-L410 | train | 220,034 |
google/transitfeed | misc/traceplus.py | MakeExpandedTrace | def MakeExpandedTrace(frame_records):
"""Return a list of text lines for the given list of frame records."""
dump = []
for (frame_obj, filename, line_num, fun_name, context_lines,
context_index) in frame_records:
dump.append('File "%s", line %d, in %s\n' % (filename, line_num,
fun_name))
if context_lines:
for (i, line) in enumerate(context_lines):
if i == context_index:
dump.append(' --> %s' % line)
else:
dump.append(' %s' % line)
for local_name, local_val in frame_obj.f_locals.items():
try:
local_type_name = type(local_val).__name__
except Exception as e:
local_type_name = ' Exception in type({}).__name__: {}'.format(local_name, e)
try:
truncated_val = repr(local_val)[0:500]
except Exception as e:
dump.append(' Exception in repr({}): {}\n'.format(local_name, e))
else:
if len(truncated_val) >= 500:
truncated_val = '%s...' % truncated_val[0:499]
dump.append(' {} = {} ({})\n'.format(local_name, truncated_val, local_type_name))
dump.append('\n')
return dump | python | def MakeExpandedTrace(frame_records):
"""Return a list of text lines for the given list of frame records."""
dump = []
for (frame_obj, filename, line_num, fun_name, context_lines,
context_index) in frame_records:
dump.append('File "%s", line %d, in %s\n' % (filename, line_num,
fun_name))
if context_lines:
for (i, line) in enumerate(context_lines):
if i == context_index:
dump.append(' --> %s' % line)
else:
dump.append(' %s' % line)
for local_name, local_val in frame_obj.f_locals.items():
try:
local_type_name = type(local_val).__name__
except Exception as e:
local_type_name = ' Exception in type({}).__name__: {}'.format(local_name, e)
try:
truncated_val = repr(local_val)[0:500]
except Exception as e:
dump.append(' Exception in repr({}): {}\n'.format(local_name, e))
else:
if len(truncated_val) >= 500:
truncated_val = '%s...' % truncated_val[0:499]
dump.append(' {} = {} ({})\n'.format(local_name, truncated_val, local_type_name))
dump.append('\n')
return dump | [
"def",
"MakeExpandedTrace",
"(",
"frame_records",
")",
":",
"dump",
"=",
"[",
"]",
"for",
"(",
"frame_obj",
",",
"filename",
",",
"line_num",
",",
"fun_name",
",",
"context_lines",
",",
"context_index",
")",
"in",
"frame_records",
":",
"dump",
".",
"append",... | Return a list of text lines for the given list of frame records. | [
"Return",
"a",
"list",
"of",
"text",
"lines",
"for",
"the",
"given",
"list",
"of",
"frame",
"records",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/misc/traceplus.py#L24-L51 | train | 220,035 |
google/transitfeed | transitfeed/shapepoint.py | ShapePoint.ParseAttributes | def ParseAttributes(self, problems):
"""Parse all attributes, calling problems as needed.
Return True if all of the values are valid.
"""
if util.IsEmpty(self.shape_id):
problems.MissingValue('shape_id')
return
try:
if not isinstance(self.shape_pt_sequence, int):
self.shape_pt_sequence = \
util.NonNegIntStringToInt(self.shape_pt_sequence, problems)
elif self.shape_pt_sequence < 0:
problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence,
'Value should be a number (0 or higher)')
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence,
'Value should be a number (0 or higher)')
return
try:
if not isinstance(self.shape_pt_lat, (int, float)):
self.shape_pt_lat = util.FloatStringToFloat(self.shape_pt_lat, problems)
if abs(self.shape_pt_lat) > 90.0:
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat)
return
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat)
return
try:
if not isinstance(self.shape_pt_lon, (int, float)):
self.shape_pt_lon = util.FloatStringToFloat(self.shape_pt_lon, problems)
if abs(self.shape_pt_lon) > 180.0:
problems.InvalidValue('shape_pt_lon', self.shape_pt_lon)
return
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_lon', self.shape_pt_lon)
return
if abs(self.shape_pt_lat) < 1.0 and abs(self.shape_pt_lon) < 1.0:
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat,
'Point location too close to 0, 0, which means '
'that it\'s probably an incorrect location.',
type=problems_module.TYPE_WARNING)
return
if self.shape_dist_traveled == '':
self.shape_dist_traveled = None
if (self.shape_dist_traveled is not None and
not isinstance(self.shape_dist_traveled, (int, float))):
try:
self.shape_dist_traveled = \
util.FloatStringToFloat(self.shape_dist_traveled, problems)
except (TypeError, ValueError):
problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled,
'This value should be a positive number.')
return
if self.shape_dist_traveled is not None and self.shape_dist_traveled < 0:
problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled,
'This value should be a positive number.')
return
return True | python | def ParseAttributes(self, problems):
"""Parse all attributes, calling problems as needed.
Return True if all of the values are valid.
"""
if util.IsEmpty(self.shape_id):
problems.MissingValue('shape_id')
return
try:
if not isinstance(self.shape_pt_sequence, int):
self.shape_pt_sequence = \
util.NonNegIntStringToInt(self.shape_pt_sequence, problems)
elif self.shape_pt_sequence < 0:
problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence,
'Value should be a number (0 or higher)')
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence,
'Value should be a number (0 or higher)')
return
try:
if not isinstance(self.shape_pt_lat, (int, float)):
self.shape_pt_lat = util.FloatStringToFloat(self.shape_pt_lat, problems)
if abs(self.shape_pt_lat) > 90.0:
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat)
return
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat)
return
try:
if not isinstance(self.shape_pt_lon, (int, float)):
self.shape_pt_lon = util.FloatStringToFloat(self.shape_pt_lon, problems)
if abs(self.shape_pt_lon) > 180.0:
problems.InvalidValue('shape_pt_lon', self.shape_pt_lon)
return
except (TypeError, ValueError):
problems.InvalidValue('shape_pt_lon', self.shape_pt_lon)
return
if abs(self.shape_pt_lat) < 1.0 and abs(self.shape_pt_lon) < 1.0:
problems.InvalidValue('shape_pt_lat', self.shape_pt_lat,
'Point location too close to 0, 0, which means '
'that it\'s probably an incorrect location.',
type=problems_module.TYPE_WARNING)
return
if self.shape_dist_traveled == '':
self.shape_dist_traveled = None
if (self.shape_dist_traveled is not None and
not isinstance(self.shape_dist_traveled, (int, float))):
try:
self.shape_dist_traveled = \
util.FloatStringToFloat(self.shape_dist_traveled, problems)
except (TypeError, ValueError):
problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled,
'This value should be a positive number.')
return
if self.shape_dist_traveled is not None and self.shape_dist_traveled < 0:
problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled,
'This value should be a positive number.')
return
return True | [
"def",
"ParseAttributes",
"(",
"self",
",",
"problems",
")",
":",
"if",
"util",
".",
"IsEmpty",
"(",
"self",
".",
"shape_id",
")",
":",
"problems",
".",
"MissingValue",
"(",
"'shape_id'",
")",
"return",
"try",
":",
"if",
"not",
"isinstance",
"(",
"self",... | Parse all attributes, calling problems as needed.
Return True if all of the values are valid. | [
"Parse",
"all",
"attributes",
"calling",
"problems",
"as",
"needed",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shapepoint.py#L59-L125 | train | 220,036 |
google/transitfeed | transitfeed/problems.py | ProblemReporter.SetFileContext | def SetFileContext(self, file_name, row_num, row, headers):
"""Save the current context to be output with any errors.
Args:
file_name: string
row_num: int
row: list of strings
headers: list of column headers, its order corresponding to row's
"""
self._context = (file_name, row_num, row, headers) | python | def SetFileContext(self, file_name, row_num, row, headers):
"""Save the current context to be output with any errors.
Args:
file_name: string
row_num: int
row: list of strings
headers: list of column headers, its order corresponding to row's
"""
self._context = (file_name, row_num, row, headers) | [
"def",
"SetFileContext",
"(",
"self",
",",
"file_name",
",",
"row_num",
",",
"row",
",",
"headers",
")",
":",
"self",
".",
"_context",
"=",
"(",
"file_name",
",",
"row_num",
",",
"row",
",",
"headers",
")"
] | Save the current context to be output with any errors.
Args:
file_name: string
row_num: int
row: list of strings
headers: list of column headers, its order corresponding to row's | [
"Save",
"the",
"current",
"context",
"to",
"be",
"output",
"with",
"any",
"errors",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L53-L62 | train | 220,037 |
google/transitfeed | transitfeed/problems.py | ProblemReporter.InvalidLineEnd | def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
"""bad_line_end is a human readable string."""
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context,
context2=self._context, type=type)
self.AddToAccumulator(e) | python | def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
"""bad_line_end is a human readable string."""
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context,
context2=self._context, type=type)
self.AddToAccumulator(e) | [
"def",
"InvalidLineEnd",
"(",
"self",
",",
"bad_line_end",
",",
"context",
"=",
"None",
",",
"type",
"=",
"TYPE_WARNING",
")",
":",
"e",
"=",
"InvalidLineEnd",
"(",
"bad_line_end",
"=",
"bad_line_end",
",",
"context",
"=",
"context",
",",
"context2",
"=",
... | bad_line_end is a human readable string. | [
"bad_line_end",
"is",
"a",
"human",
"readable",
"string",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L268-L272 | train | 220,038 |
google/transitfeed | transitfeed/problems.py | ExceptionWithContext.GetDictToFormat | def GetDictToFormat(self):
"""Return a copy of self as a dict, suitable for passing to FormatProblem"""
d = {}
for k, v in self.__dict__.items():
# TODO: Better handling of unicode/utf-8 within Schedule objects.
# Concatinating a unicode and utf-8 str object causes an exception such
# as "UnicodeDecodeError: 'ascii' codec can't decode byte ..." as python
# tries to convert the str to a unicode. To avoid that happening within
# the problem reporter convert all unicode attributes to utf-8.
# Currently valid utf-8 fields are converted to unicode in _ReadCsvDict.
# Perhaps all fields should be left as utf-8.
d[k] = util.EncodeUnicode(v)
return d | python | def GetDictToFormat(self):
"""Return a copy of self as a dict, suitable for passing to FormatProblem"""
d = {}
for k, v in self.__dict__.items():
# TODO: Better handling of unicode/utf-8 within Schedule objects.
# Concatinating a unicode and utf-8 str object causes an exception such
# as "UnicodeDecodeError: 'ascii' codec can't decode byte ..." as python
# tries to convert the str to a unicode. To avoid that happening within
# the problem reporter convert all unicode attributes to utf-8.
# Currently valid utf-8 fields are converted to unicode in _ReadCsvDict.
# Perhaps all fields should be left as utf-8.
d[k] = util.EncodeUnicode(v)
return d | [
"def",
"GetDictToFormat",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"# TODO: Better handling of unicode/utf-8 within Schedule objects.",
"# Concatinating a unicode and utf-8 str object ... | Return a copy of self as a dict, suitable for passing to FormatProblem | [
"Return",
"a",
"copy",
"of",
"self",
"as",
"a",
"dict",
"suitable",
"for",
"passing",
"to",
"FormatProblem"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L445-L457 | train | 220,039 |
google/transitfeed | transitfeed/problems.py | ExceptionWithContext.FormatProblem | def FormatProblem(self, d=None):
"""Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added
"""
if not d:
d = self.GetDictToFormat()
output_error_text = self.__class__.ERROR_TEXT % d
if ('reason' in d) and d['reason']:
return '%s\n%s' % (output_error_text, d['reason'])
else:
return output_error_text | python | def FormatProblem(self, d=None):
"""Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added
"""
if not d:
d = self.GetDictToFormat()
output_error_text = self.__class__.ERROR_TEXT % d
if ('reason' in d) and d['reason']:
return '%s\n%s' % (output_error_text, d['reason'])
else:
return output_error_text | [
"def",
"FormatProblem",
"(",
"self",
",",
"d",
"=",
"None",
")",
":",
"if",
"not",
"d",
":",
"d",
"=",
"self",
".",
"GetDictToFormat",
"(",
")",
"output_error_text",
"=",
"self",
".",
"__class__",
".",
"ERROR_TEXT",
"%",
"d",
"if",
"(",
"'reason'",
"... | Return a text string describing the problem.
Args:
d: map returned by GetDictToFormat with with formatting added | [
"Return",
"a",
"text",
"string",
"describing",
"the",
"problem",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L459-L472 | train | 220,040 |
google/transitfeed | transitfeed/problems.py | ExceptionWithContext.FormatContext | def FormatContext(self):
"""Return a text string describing the context"""
text = ''
if hasattr(self, 'feed_name'):
text += "In feed '%s': " % self.feed_name
if hasattr(self, 'file_name'):
text += self.file_name
if hasattr(self, 'row_num'):
text += ":%i" % self.row_num
if hasattr(self, 'column_name'):
text += " column %s" % self.column_name
return text | python | def FormatContext(self):
"""Return a text string describing the context"""
text = ''
if hasattr(self, 'feed_name'):
text += "In feed '%s': " % self.feed_name
if hasattr(self, 'file_name'):
text += self.file_name
if hasattr(self, 'row_num'):
text += ":%i" % self.row_num
if hasattr(self, 'column_name'):
text += " column %s" % self.column_name
return text | [
"def",
"FormatContext",
"(",
"self",
")",
":",
"text",
"=",
"''",
"if",
"hasattr",
"(",
"self",
",",
"'feed_name'",
")",
":",
"text",
"+=",
"\"In feed '%s': \"",
"%",
"self",
".",
"feed_name",
"if",
"hasattr",
"(",
"self",
",",
"'file_name'",
")",
":",
... | Return a text string describing the context | [
"Return",
"a",
"text",
"string",
"describing",
"the",
"context"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L474-L485 | train | 220,041 |
google/transitfeed | transitfeed/problems.py | ExceptionWithContext.GetOrderKey | def GetOrderKey(self):
"""Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values.
"""
context_attributes = ['_type']
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens | python | def GetOrderKey(self):
"""Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values.
"""
context_attributes = ['_type']
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens | [
"def",
"GetOrderKey",
"(",
"self",
")",
":",
"context_attributes",
"=",
"[",
"'_type'",
"]",
"context_attributes",
".",
"extend",
"(",
"ExceptionWithContext",
".",
"CONTEXT_PARTS",
")",
"context_attributes",
".",
"extend",
"(",
"self",
".",
"_GetExtraOrderAttributes... | Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values. | [
"Return",
"a",
"tuple",
"that",
"can",
"be",
"used",
"to",
"sort",
"problems",
"into",
"a",
"consistent",
"order",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L506-L519 | train | 220,042 |
google/transitfeed | visualize_pathways.py | gtfs_to_graphviz | def gtfs_to_graphviz(gtfs, stop_ids=None):
"""Reads GTFS data and returns GraphViz DOT file content as string.
"""
graph = GraphViz()
location_ids = choose_location_ids(gtfs, stop_ids)
locations = [gtfs.get_location(i) for i in location_ids]
for location in locations:
if not location.parent_id:
graph.add_cluster(GraphCluster(
location.gtfs_id,
location_label(location, max_length=-1),
location_color(location.location_type)))
for location in locations:
if location.parent_id and requires_platform_cluster(location):
graph.get_cluster(location.parent_id).add_cluster(GraphCluster(
location.gtfs_id,
location_label(location),
location_color(location.location_type)))
for location in locations:
if not location.parent_id or requires_platform_cluster(location):
continue
node = GraphNode(
location.gtfs_id,
location_label(location, max_length=25),
location_color(location.location_type),
location_shape(location.location_type))
cluster = graph.get_cluster(location.station().gtfs_id)
if location.location_type == LocationType.boarding_area:
cluster = cluster.get_cluster(location.parent_id)
cluster.nodes.append(node)
for pathway in gtfs.pathways:
if pathway.from_id in location_ids and pathway.to_id in location_ids:
graph.edges.append(GraphEdge(
pathway.from_id, pathway.to_id,
'both' if pathway.is_bidirectional else 'forward',
pathway_label(pathway)))
return graph | python | def gtfs_to_graphviz(gtfs, stop_ids=None):
"""Reads GTFS data and returns GraphViz DOT file content as string.
"""
graph = GraphViz()
location_ids = choose_location_ids(gtfs, stop_ids)
locations = [gtfs.get_location(i) for i in location_ids]
for location in locations:
if not location.parent_id:
graph.add_cluster(GraphCluster(
location.gtfs_id,
location_label(location, max_length=-1),
location_color(location.location_type)))
for location in locations:
if location.parent_id and requires_platform_cluster(location):
graph.get_cluster(location.parent_id).add_cluster(GraphCluster(
location.gtfs_id,
location_label(location),
location_color(location.location_type)))
for location in locations:
if not location.parent_id or requires_platform_cluster(location):
continue
node = GraphNode(
location.gtfs_id,
location_label(location, max_length=25),
location_color(location.location_type),
location_shape(location.location_type))
cluster = graph.get_cluster(location.station().gtfs_id)
if location.location_type == LocationType.boarding_area:
cluster = cluster.get_cluster(location.parent_id)
cluster.nodes.append(node)
for pathway in gtfs.pathways:
if pathway.from_id in location_ids and pathway.to_id in location_ids:
graph.edges.append(GraphEdge(
pathway.from_id, pathway.to_id,
'both' if pathway.is_bidirectional else 'forward',
pathway_label(pathway)))
return graph | [
"def",
"gtfs_to_graphviz",
"(",
"gtfs",
",",
"stop_ids",
"=",
"None",
")",
":",
"graph",
"=",
"GraphViz",
"(",
")",
"location_ids",
"=",
"choose_location_ids",
"(",
"gtfs",
",",
"stop_ids",
")",
"locations",
"=",
"[",
"gtfs",
".",
"get_location",
"(",
"i",... | Reads GTFS data and returns GraphViz DOT file content as string. | [
"Reads",
"GTFS",
"data",
"and",
"returns",
"GraphViz",
"DOT",
"file",
"content",
"as",
"string",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/visualize_pathways.py#L424-L466 | train | 220,043 |
google/transitfeed | transitfeed/gtfsfactoryuser.py | GtfsFactoryUser.GetGtfsFactory | def GetGtfsFactory(self):
"""Return the object's GTFS Factory.
Returns:
The GTFS Factory that was set for this object. If none was explicitly
set, it first sets the object's factory to transitfeed's GtfsFactory
and returns it"""
if self._gtfs_factory is None:
#TODO(anog): We really need to create a dependency graph and clean things
# up, as the comment in __init__.py says.
# Not having GenericGTFSObject as a leaf (with no other
# imports) creates all sorts of circular import problems.
# This is why the import is here and not at the top level.
# When this runs, gtfsfactory should have already been loaded
# by other modules, avoiding the circular imports.
from . import gtfsfactory
self._gtfs_factory = gtfsfactory.GetGtfsFactory()
return self._gtfs_factory | python | def GetGtfsFactory(self):
"""Return the object's GTFS Factory.
Returns:
The GTFS Factory that was set for this object. If none was explicitly
set, it first sets the object's factory to transitfeed's GtfsFactory
and returns it"""
if self._gtfs_factory is None:
#TODO(anog): We really need to create a dependency graph and clean things
# up, as the comment in __init__.py says.
# Not having GenericGTFSObject as a leaf (with no other
# imports) creates all sorts of circular import problems.
# This is why the import is here and not at the top level.
# When this runs, gtfsfactory should have already been loaded
# by other modules, avoiding the circular imports.
from . import gtfsfactory
self._gtfs_factory = gtfsfactory.GetGtfsFactory()
return self._gtfs_factory | [
"def",
"GetGtfsFactory",
"(",
"self",
")",
":",
"if",
"self",
".",
"_gtfs_factory",
"is",
"None",
":",
"#TODO(anog): We really need to create a dependency graph and clean things",
"# up, as the comment in __init__.py says.",
"# Not having GenericGTFSObject as a le... | Return the object's GTFS Factory.
Returns:
The GTFS Factory that was set for this object. If none was explicitly
set, it first sets the object's factory to transitfeed's GtfsFactory
and returns it | [
"Return",
"the",
"object",
"s",
"GTFS",
"Factory",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/gtfsfactoryuser.py#L26-L44 | train | 220,044 |
google/transitfeed | transitfeed/gtfsobjectbase.py | GtfsObjectBase.keys | def keys(self):
"""Return iterable of columns used by this object."""
columns = set()
for name in vars(self):
if (not name) or name[0] == "_":
continue
columns.add(name)
return columns | python | def keys(self):
"""Return iterable of columns used by this object."""
columns = set()
for name in vars(self):
if (not name) or name[0] == "_":
continue
columns.add(name)
return columns | [
"def",
"keys",
"(",
"self",
")",
":",
"columns",
"=",
"set",
"(",
")",
"for",
"name",
"in",
"vars",
"(",
"self",
")",
":",
"if",
"(",
"not",
"name",
")",
"or",
"name",
"[",
"0",
"]",
"==",
"\"_\"",
":",
"continue",
"columns",
".",
"add",
"(",
... | Return iterable of columns used by this object. | [
"Return",
"iterable",
"of",
"columns",
"used",
"by",
"this",
"object",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/gtfsobjectbase.py#L108-L115 | train | 220,045 |
google/transitfeed | transitfeed/shape.py | Shape.AddShapePointObjectUnsorted | def AddShapePointObjectUnsorted(self, shapepoint, problems):
"""Insert a point into a correct position by sequence. """
if (len(self.sequence) == 0 or
shapepoint.shape_pt_sequence >= self.sequence[-1]):
index = len(self.sequence)
elif shapepoint.shape_pt_sequence <= self.sequence[0]:
index = 0
else:
index = bisect.bisect(self.sequence, shapepoint.shape_pt_sequence)
if shapepoint.shape_pt_sequence in self.sequence:
problems.InvalidValue('shape_pt_sequence', shapepoint.shape_pt_sequence,
'The sequence number %d occurs more than once in '
'shape %s.' %
(shapepoint.shape_pt_sequence, self.shape_id))
if shapepoint.shape_dist_traveled is not None and len(self.sequence) > 0:
if (index != len(self.sequence) and
shapepoint.shape_dist_traveled > self.distance[index]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that shouldn\'t be larger '
'than the next ones. In this case, the next '
'distance was %f.' % self.distance[index])
if (index > 0 and
shapepoint.shape_dist_traveled < self.distance[index - 1]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that\'s at least as large as '
'the previous ones. In this case, the previous '
'distance was %f.' % self.distance[index - 1])
if shapepoint.shape_dist_traveled > self.max_distance:
self.max_distance = shapepoint.shape_dist_traveled
self.sequence.insert(index, shapepoint.shape_pt_sequence)
self.distance.insert(index, shapepoint.shape_dist_traveled)
self.points.insert(index, (shapepoint.shape_pt_lat,
shapepoint.shape_pt_lon,
shapepoint.shape_dist_traveled)) | python | def AddShapePointObjectUnsorted(self, shapepoint, problems):
"""Insert a point into a correct position by sequence. """
if (len(self.sequence) == 0 or
shapepoint.shape_pt_sequence >= self.sequence[-1]):
index = len(self.sequence)
elif shapepoint.shape_pt_sequence <= self.sequence[0]:
index = 0
else:
index = bisect.bisect(self.sequence, shapepoint.shape_pt_sequence)
if shapepoint.shape_pt_sequence in self.sequence:
problems.InvalidValue('shape_pt_sequence', shapepoint.shape_pt_sequence,
'The sequence number %d occurs more than once in '
'shape %s.' %
(shapepoint.shape_pt_sequence, self.shape_id))
if shapepoint.shape_dist_traveled is not None and len(self.sequence) > 0:
if (index != len(self.sequence) and
shapepoint.shape_dist_traveled > self.distance[index]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that shouldn\'t be larger '
'than the next ones. In this case, the next '
'distance was %f.' % self.distance[index])
if (index > 0 and
shapepoint.shape_dist_traveled < self.distance[index - 1]):
problems.InvalidValue('shape_dist_traveled',
shapepoint.shape_dist_traveled,
'Each subsequent point in a shape should have '
'a distance value that\'s at least as large as '
'the previous ones. In this case, the previous '
'distance was %f.' % self.distance[index - 1])
if shapepoint.shape_dist_traveled > self.max_distance:
self.max_distance = shapepoint.shape_dist_traveled
self.sequence.insert(index, shapepoint.shape_pt_sequence)
self.distance.insert(index, shapepoint.shape_dist_traveled)
self.points.insert(index, (shapepoint.shape_pt_lat,
shapepoint.shape_pt_lon,
shapepoint.shape_dist_traveled)) | [
"def",
"AddShapePointObjectUnsorted",
"(",
"self",
",",
"shapepoint",
",",
"problems",
")",
":",
"if",
"(",
"len",
"(",
"self",
".",
"sequence",
")",
"==",
"0",
"or",
"shapepoint",
".",
"shape_pt_sequence",
">=",
"self",
".",
"sequence",
"[",
"-",
"1",
"... | Insert a point into a correct position by sequence. | [
"Insert",
"a",
"point",
"into",
"a",
"correct",
"position",
"by",
"sequence",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shape.py#L54-L96 | train | 220,046 |
google/transitfeed | transitfeed/shape.py | Shape.GetPointWithDistanceTraveled | def GetPointWithDistanceTraveled(self, shape_dist_traveled):
"""Returns a point on the shape polyline with the input shape_dist_traveled.
Args:
shape_dist_traveled: The input shape_dist_traveled.
Returns:
The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and
lng is the location of the shape point, and shape_dist_traveled is an
increasing metric representing the distance traveled along the shape.
Returns None if there is data error in shape.
"""
if not self.distance:
return None
if shape_dist_traveled <= self.distance[0]:
return self.points[0]
if shape_dist_traveled >= self.distance[-1]:
return self.points[-1]
index = bisect.bisect(self.distance, shape_dist_traveled)
(lat0, lng0, dist0) = self.points[index - 1]
(lat1, lng1, dist1) = self.points[index]
# Interpolate if shape_dist_traveled does not equal to any of the point
# in shape segment.
# (lat0, lng0) (lat, lng) (lat1, lng1)
# -----|--------------------|---------------------|------
# dist0 shape_dist_traveled dist1
# \------- ca --------/ \-------- bc -------/
# \----------------- ba ------------------/
ca = shape_dist_traveled - dist0
bc = dist1 - shape_dist_traveled
ba = bc + ca
if ba == 0:
# This only happens when there's data error in shapes and should have been
# catched before. Check to avoid crash.
return None
# This won't work crossing longitude 180 and is only an approximation which
# works well for short distance.
lat = (lat1 * ca + lat0 * bc) / ba
lng = (lng1 * ca + lng0 * bc) / ba
return (lat, lng, shape_dist_traveled) | python | def GetPointWithDistanceTraveled(self, shape_dist_traveled):
"""Returns a point on the shape polyline with the input shape_dist_traveled.
Args:
shape_dist_traveled: The input shape_dist_traveled.
Returns:
The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and
lng is the location of the shape point, and shape_dist_traveled is an
increasing metric representing the distance traveled along the shape.
Returns None if there is data error in shape.
"""
if not self.distance:
return None
if shape_dist_traveled <= self.distance[0]:
return self.points[0]
if shape_dist_traveled >= self.distance[-1]:
return self.points[-1]
index = bisect.bisect(self.distance, shape_dist_traveled)
(lat0, lng0, dist0) = self.points[index - 1]
(lat1, lng1, dist1) = self.points[index]
# Interpolate if shape_dist_traveled does not equal to any of the point
# in shape segment.
# (lat0, lng0) (lat, lng) (lat1, lng1)
# -----|--------------------|---------------------|------
# dist0 shape_dist_traveled dist1
# \------- ca --------/ \-------- bc -------/
# \----------------- ba ------------------/
ca = shape_dist_traveled - dist0
bc = dist1 - shape_dist_traveled
ba = bc + ca
if ba == 0:
# This only happens when there's data error in shapes and should have been
# catched before. Check to avoid crash.
return None
# This won't work crossing longitude 180 and is only an approximation which
# works well for short distance.
lat = (lat1 * ca + lat0 * bc) / ba
lng = (lng1 * ca + lng0 * bc) / ba
return (lat, lng, shape_dist_traveled) | [
"def",
"GetPointWithDistanceTraveled",
"(",
"self",
",",
"shape_dist_traveled",
")",
":",
"if",
"not",
"self",
".",
"distance",
":",
"return",
"None",
"if",
"shape_dist_traveled",
"<=",
"self",
".",
"distance",
"[",
"0",
"]",
":",
"return",
"self",
".",
"poi... | Returns a point on the shape polyline with the input shape_dist_traveled.
Args:
shape_dist_traveled: The input shape_dist_traveled.
Returns:
The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and
lng is the location of the shape point, and shape_dist_traveled is an
increasing metric representing the distance traveled along the shape.
Returns None if there is data error in shape. | [
"Returns",
"a",
"point",
"on",
"the",
"shape",
"polyline",
"with",
"the",
"input",
"shape_dist_traveled",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shape.py#L129-L170 | train | 220,047 |
google/transitfeed | transitfeed/trip.py | Trip.AddStopTime | def AddStopTime(self, stop, problems=None, schedule=None, **kwargs):
"""Add a stop to this trip. Stops must be added in the order visited.
Args:
stop: A Stop object
kwargs: remaining keyword args passed to StopTime.__init__
Returns:
None
"""
if problems is None:
# TODO: delete this branch when StopTime.__init__ doesn't need a
# ProblemReporter
problems = problems_module.default_problem_reporter
stoptime = self.GetGtfsFactory().StopTime(
problems=problems, stop=stop, **kwargs)
self.AddStopTimeObject(stoptime, schedule) | python | def AddStopTime(self, stop, problems=None, schedule=None, **kwargs):
"""Add a stop to this trip. Stops must be added in the order visited.
Args:
stop: A Stop object
kwargs: remaining keyword args passed to StopTime.__init__
Returns:
None
"""
if problems is None:
# TODO: delete this branch when StopTime.__init__ doesn't need a
# ProblemReporter
problems = problems_module.default_problem_reporter
stoptime = self.GetGtfsFactory().StopTime(
problems=problems, stop=stop, **kwargs)
self.AddStopTimeObject(stoptime, schedule) | [
"def",
"AddStopTime",
"(",
"self",
",",
"stop",
",",
"problems",
"=",
"None",
",",
"schedule",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"problems",
"is",
"None",
":",
"# TODO: delete this branch when StopTime.__init__ doesn't need a",
"# ProblemReport... | Add a stop to this trip. Stops must be added in the order visited.
Args:
stop: A Stop object
kwargs: remaining keyword args passed to StopTime.__init__
Returns:
None | [
"Add",
"a",
"stop",
"to",
"this",
"trip",
".",
"Stops",
"must",
"be",
"added",
"in",
"the",
"order",
"visited",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L59-L75 | train | 220,048 |
google/transitfeed | transitfeed/trip.py | Trip._AddStopTimeObjectUnordered | def _AddStopTimeObjectUnordered(self, stoptime, schedule):
"""Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later."""
stop_time_class = self.GetGtfsFactory().StopTime
cursor = schedule._connection.cursor()
insert_query = "INSERT INTO stop_times (%s) VALUES (%s);" % (
','.join(stop_time_class._SQL_FIELD_NAMES),
','.join(['?'] * len(stop_time_class._SQL_FIELD_NAMES)))
cursor = schedule._connection.cursor()
cursor.execute(
insert_query, stoptime.GetSqlValuesTuple(self.trip_id)) | python | def _AddStopTimeObjectUnordered(self, stoptime, schedule):
"""Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later."""
stop_time_class = self.GetGtfsFactory().StopTime
cursor = schedule._connection.cursor()
insert_query = "INSERT INTO stop_times (%s) VALUES (%s);" % (
','.join(stop_time_class._SQL_FIELD_NAMES),
','.join(['?'] * len(stop_time_class._SQL_FIELD_NAMES)))
cursor = schedule._connection.cursor()
cursor.execute(
insert_query, stoptime.GetSqlValuesTuple(self.trip_id)) | [
"def",
"_AddStopTimeObjectUnordered",
"(",
"self",
",",
"stoptime",
",",
"schedule",
")",
":",
"stop_time_class",
"=",
"self",
".",
"GetGtfsFactory",
"(",
")",
".",
"StopTime",
"cursor",
"=",
"schedule",
".",
"_connection",
".",
"cursor",
"(",
")",
"insert_que... | Add StopTime object to this trip.
The trip isn't checked for duplicate sequence numbers so it must be
validated later. | [
"Add",
"StopTime",
"object",
"to",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L77-L89 | train | 220,049 |
google/transitfeed | transitfeed/trip.py | Trip.ReplaceStopTimeObject | def ReplaceStopTimeObject(self, stoptime, schedule=None):
"""Replace a StopTime object from this trip with the given one.
Keys the StopTime object to be replaced by trip_id, stop_sequence
and stop_id as 'stoptime', with the object 'stoptime'.
"""
if schedule is None:
schedule = self._schedule
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("DELETE FROM stop_times WHERE trip_id=? and "
"stop_sequence=? and stop_id=?",
(self.trip_id, stoptime.stop_sequence, stoptime.stop_id))
if cursor.rowcount == 0:
raise problems_module.Error('Attempted replacement of StopTime object which does not exist')
self._AddStopTimeObjectUnordered(stoptime, schedule) | python | def ReplaceStopTimeObject(self, stoptime, schedule=None):
"""Replace a StopTime object from this trip with the given one.
Keys the StopTime object to be replaced by trip_id, stop_sequence
and stop_id as 'stoptime', with the object 'stoptime'.
"""
if schedule is None:
schedule = self._schedule
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("DELETE FROM stop_times WHERE trip_id=? and "
"stop_sequence=? and stop_id=?",
(self.trip_id, stoptime.stop_sequence, stoptime.stop_id))
if cursor.rowcount == 0:
raise problems_module.Error('Attempted replacement of StopTime object which does not exist')
self._AddStopTimeObjectUnordered(stoptime, schedule) | [
"def",
"ReplaceStopTimeObject",
"(",
"self",
",",
"stoptime",
",",
"schedule",
"=",
"None",
")",
":",
"if",
"schedule",
"is",
"None",
":",
"schedule",
"=",
"self",
".",
"_schedule",
"new_secs",
"=",
"stoptime",
".",
"GetTimeSecs",
"(",
")",
"cursor",
"=",
... | Replace a StopTime object from this trip with the given one.
Keys the StopTime object to be replaced by trip_id, stop_sequence
and stop_id as 'stoptime', with the object 'stoptime'. | [
"Replace",
"a",
"StopTime",
"object",
"from",
"this",
"trip",
"with",
"the",
"given",
"one",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L91-L108 | train | 220,050 |
google/transitfeed | transitfeed/trip.py | Trip.AddStopTimeObject | def AddStopTimeObject(self, stoptime, schedule=None, problems=None):
"""Add a StopTime object to the end of this trip.
Args:
stoptime: A StopTime object. Should not be reused in multiple trips.
schedule: Schedule object containing this trip which must be
passed to Trip.__init__ or here
problems: ProblemReporter object for validating the StopTime in its new
home
Returns:
None
"""
if schedule is None:
schedule = self._schedule
if schedule is None:
warnings.warn("No longer supported. _schedule attribute is used to get "
"stop_times table", DeprecationWarning)
if problems is None:
problems = schedule.problem_reporter
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("SELECT max(stop_sequence), max(arrival_secs), "
"max(departure_secs) FROM stop_times WHERE trip_id=?",
(self.trip_id,))
row = cursor.fetchone()
if row[0] is None:
# This is the first stop_time of the trip
stoptime.stop_sequence = 1
if new_secs == None:
problems.OtherProblem(
'No time for first StopTime of trip_id "%s"' % (self.trip_id,))
else:
stoptime.stop_sequence = row[0] + 1
prev_secs = max(row[1], row[2])
if new_secs != None and new_secs < prev_secs:
problems.OtherProblem(
'out of order stop time for stop_id=%s trip_id=%s %s < %s' %
(util.EncodeUnicode(stoptime.stop_id),
util.EncodeUnicode(self.trip_id),
util.FormatSecondsSinceMidnight(new_secs),
util.FormatSecondsSinceMidnight(prev_secs)))
self._AddStopTimeObjectUnordered(stoptime, schedule) | python | def AddStopTimeObject(self, stoptime, schedule=None, problems=None):
"""Add a StopTime object to the end of this trip.
Args:
stoptime: A StopTime object. Should not be reused in multiple trips.
schedule: Schedule object containing this trip which must be
passed to Trip.__init__ or here
problems: ProblemReporter object for validating the StopTime in its new
home
Returns:
None
"""
if schedule is None:
schedule = self._schedule
if schedule is None:
warnings.warn("No longer supported. _schedule attribute is used to get "
"stop_times table", DeprecationWarning)
if problems is None:
problems = schedule.problem_reporter
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("SELECT max(stop_sequence), max(arrival_secs), "
"max(departure_secs) FROM stop_times WHERE trip_id=?",
(self.trip_id,))
row = cursor.fetchone()
if row[0] is None:
# This is the first stop_time of the trip
stoptime.stop_sequence = 1
if new_secs == None:
problems.OtherProblem(
'No time for first StopTime of trip_id "%s"' % (self.trip_id,))
else:
stoptime.stop_sequence = row[0] + 1
prev_secs = max(row[1], row[2])
if new_secs != None and new_secs < prev_secs:
problems.OtherProblem(
'out of order stop time for stop_id=%s trip_id=%s %s < %s' %
(util.EncodeUnicode(stoptime.stop_id),
util.EncodeUnicode(self.trip_id),
util.FormatSecondsSinceMidnight(new_secs),
util.FormatSecondsSinceMidnight(prev_secs)))
self._AddStopTimeObjectUnordered(stoptime, schedule) | [
"def",
"AddStopTimeObject",
"(",
"self",
",",
"stoptime",
",",
"schedule",
"=",
"None",
",",
"problems",
"=",
"None",
")",
":",
"if",
"schedule",
"is",
"None",
":",
"schedule",
"=",
"self",
".",
"_schedule",
"if",
"schedule",
"is",
"None",
":",
"warnings... | Add a StopTime object to the end of this trip.
Args:
stoptime: A StopTime object. Should not be reused in multiple trips.
schedule: Schedule object containing this trip which must be
passed to Trip.__init__ or here
problems: ProblemReporter object for validating the StopTime in its new
home
Returns:
None | [
"Add",
"a",
"StopTime",
"object",
"to",
"the",
"end",
"of",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L110-L153 | train | 220,051 |
google/transitfeed | transitfeed/trip.py | Trip.GetCountStopTimes | def GetCountStopTimes(self):
"""Return the number of stops made by this trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,))
return cursor.fetchone()[0] | python | def GetCountStopTimes(self):
"""Return the number of stops made by this trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,))
return cursor.fetchone()[0] | [
"def",
"GetCountStopTimes",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"_schedule",
".",
"_connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'SELECT count(*) FROM stop_times WHERE trip_id=?'",
",",
"(",
"self",
".",
"trip_id",
",",
... | Return the number of stops made by this trip. | [
"Return",
"the",
"number",
"of",
"stops",
"made",
"by",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L163-L168 | train | 220,052 |
google/transitfeed | transitfeed/trip.py | Trip.ClearStopTimes | def ClearStopTimes(self):
"""Remove all stop times from this trip.
StopTime objects previously returned by GetStopTimes are unchanged but are
no longer associated with this trip.
"""
cursor = self._schedule._connection.cursor()
cursor.execute('DELETE FROM stop_times WHERE trip_id=?', (self.trip_id,)) | python | def ClearStopTimes(self):
"""Remove all stop times from this trip.
StopTime objects previously returned by GetStopTimes are unchanged but are
no longer associated with this trip.
"""
cursor = self._schedule._connection.cursor()
cursor.execute('DELETE FROM stop_times WHERE trip_id=?', (self.trip_id,)) | [
"def",
"ClearStopTimes",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"_schedule",
".",
"_connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'DELETE FROM stop_times WHERE trip_id=?'",
",",
"(",
"self",
".",
"trip_id",
",",
")",
")"
] | Remove all stop times from this trip.
StopTime objects previously returned by GetStopTimes are unchanged but are
no longer associated with this trip. | [
"Remove",
"all",
"stop",
"times",
"from",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L218-L225 | train | 220,053 |
google/transitfeed | transitfeed/trip.py | Trip.GetStopTimes | def GetStopTimes(self, problems=None):
"""Return a sorted list of StopTime objects for this trip."""
# In theory problems=None should be safe because data from database has been
# validated. See comment in _LoadStopTimes for why this isn't always true.
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT arrival_secs,departure_secs,stop_headsign,pickup_type,'
'drop_off_type,shape_dist_traveled,stop_id,stop_sequence,timepoint '
'FROM stop_times '
'WHERE trip_id=? '
'ORDER BY stop_sequence', (self.trip_id,))
stop_times = []
stoptime_class = self.GetGtfsFactory().StopTime
if problems is None:
# TODO: delete this branch when StopTime.__init__ doesn't need a
# ProblemReporter
problems = problems_module.default_problem_reporter
for row in cursor.fetchall():
stop = self._schedule.GetStop(row[6])
stop_times.append(stoptime_class(problems=problems,
stop=stop,
arrival_secs=row[0],
departure_secs=row[1],
stop_headsign=row[2],
pickup_type=row[3],
drop_off_type=row[4],
shape_dist_traveled=row[5],
stop_sequence=row[7],
timepoint=row[8]))
return stop_times | python | def GetStopTimes(self, problems=None):
"""Return a sorted list of StopTime objects for this trip."""
# In theory problems=None should be safe because data from database has been
# validated. See comment in _LoadStopTimes for why this isn't always true.
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT arrival_secs,departure_secs,stop_headsign,pickup_type,'
'drop_off_type,shape_dist_traveled,stop_id,stop_sequence,timepoint '
'FROM stop_times '
'WHERE trip_id=? '
'ORDER BY stop_sequence', (self.trip_id,))
stop_times = []
stoptime_class = self.GetGtfsFactory().StopTime
if problems is None:
# TODO: delete this branch when StopTime.__init__ doesn't need a
# ProblemReporter
problems = problems_module.default_problem_reporter
for row in cursor.fetchall():
stop = self._schedule.GetStop(row[6])
stop_times.append(stoptime_class(problems=problems,
stop=stop,
arrival_secs=row[0],
departure_secs=row[1],
stop_headsign=row[2],
pickup_type=row[3],
drop_off_type=row[4],
shape_dist_traveled=row[5],
stop_sequence=row[7],
timepoint=row[8]))
return stop_times | [
"def",
"GetStopTimes",
"(",
"self",
",",
"problems",
"=",
"None",
")",
":",
"# In theory problems=None should be safe because data from database has been",
"# validated. See comment in _LoadStopTimes for why this isn't always true.",
"cursor",
"=",
"self",
".",
"_schedule",
".",
... | Return a sorted list of StopTime objects for this trip. | [
"Return",
"a",
"sorted",
"list",
"of",
"StopTime",
"objects",
"for",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L227-L256 | train | 220,054 |
google/transitfeed | transitfeed/trip.py | Trip.GetFrequencyStopTimes | def GetFrequencyStopTimes(self, problems=None):
"""Return a list of StopTime objects for each headway-based run.
Returns:
a list of list of StopTime objects. Each list of StopTime objects
represents one run. If this trip doesn't have headways returns an empty
list.
"""
stoptimes_list = [] # list of stoptime lists to be returned
stoptime_pattern = self.GetStopTimes()
first_secs = stoptime_pattern[0].arrival_secs # first time of the trip
stoptime_class = self.GetGtfsFactory().StopTime
# for each start time of a headway run
for run_secs in self.GetFrequencyStartTimes():
# stop time list for a headway run
stoptimes = []
# go through the pattern and generate stoptimes
for st in stoptime_pattern:
arrival_secs, departure_secs = None, None # default value if the stoptime is not timepoint
if st.arrival_secs != None:
arrival_secs = st.arrival_secs - first_secs + run_secs
if st.departure_secs != None:
departure_secs = st.departure_secs - first_secs + run_secs
# append stoptime
stoptimes.append(stoptime_class(problems=problems, stop=st.stop,
arrival_secs=arrival_secs,
departure_secs=departure_secs,
stop_headsign=st.stop_headsign,
pickup_type=st.pickup_type,
drop_off_type=st.drop_off_type,
shape_dist_traveled= \
st.shape_dist_traveled,
stop_sequence=st.stop_sequence,
timepoint=st.timepoint))
# add stoptimes to the stoptimes_list
stoptimes_list.append ( stoptimes )
return stoptimes_list | python | def GetFrequencyStopTimes(self, problems=None):
"""Return a list of StopTime objects for each headway-based run.
Returns:
a list of list of StopTime objects. Each list of StopTime objects
represents one run. If this trip doesn't have headways returns an empty
list.
"""
stoptimes_list = [] # list of stoptime lists to be returned
stoptime_pattern = self.GetStopTimes()
first_secs = stoptime_pattern[0].arrival_secs # first time of the trip
stoptime_class = self.GetGtfsFactory().StopTime
# for each start time of a headway run
for run_secs in self.GetFrequencyStartTimes():
# stop time list for a headway run
stoptimes = []
# go through the pattern and generate stoptimes
for st in stoptime_pattern:
arrival_secs, departure_secs = None, None # default value if the stoptime is not timepoint
if st.arrival_secs != None:
arrival_secs = st.arrival_secs - first_secs + run_secs
if st.departure_secs != None:
departure_secs = st.departure_secs - first_secs + run_secs
# append stoptime
stoptimes.append(stoptime_class(problems=problems, stop=st.stop,
arrival_secs=arrival_secs,
departure_secs=departure_secs,
stop_headsign=st.stop_headsign,
pickup_type=st.pickup_type,
drop_off_type=st.drop_off_type,
shape_dist_traveled= \
st.shape_dist_traveled,
stop_sequence=st.stop_sequence,
timepoint=st.timepoint))
# add stoptimes to the stoptimes_list
stoptimes_list.append ( stoptimes )
return stoptimes_list | [
"def",
"GetFrequencyStopTimes",
"(",
"self",
",",
"problems",
"=",
"None",
")",
":",
"stoptimes_list",
"=",
"[",
"]",
"# list of stoptime lists to be returned",
"stoptime_pattern",
"=",
"self",
".",
"GetStopTimes",
"(",
")",
"first_secs",
"=",
"stoptime_pattern",
"[... | Return a list of StopTime objects for each headway-based run.
Returns:
a list of list of StopTime objects. Each list of StopTime objects
represents one run. If this trip doesn't have headways returns an empty
list. | [
"Return",
"a",
"list",
"of",
"StopTime",
"objects",
"for",
"each",
"headway",
"-",
"based",
"run",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L265-L301 | train | 220,055 |
google/transitfeed | transitfeed/trip.py | Trip.GetFrequencyStartTimes | def GetFrequencyStartTimes(self):
"""Return a list of start time for each headway-based run.
Returns:
a sorted list of seconds since midnight, the start time of each run. If
this trip doesn't have headways returns an empty list."""
start_times = []
# for each headway period of the trip
for freq_tuple in self.GetFrequencyTuples():
(start_secs, end_secs, headway_secs) = freq_tuple[0:3]
# reset run secs to the start of the timeframe
run_secs = start_secs
while run_secs < end_secs:
start_times.append(run_secs)
# increment current run secs by headway secs
run_secs += headway_secs
return start_times | python | def GetFrequencyStartTimes(self):
"""Return a list of start time for each headway-based run.
Returns:
a sorted list of seconds since midnight, the start time of each run. If
this trip doesn't have headways returns an empty list."""
start_times = []
# for each headway period of the trip
for freq_tuple in self.GetFrequencyTuples():
(start_secs, end_secs, headway_secs) = freq_tuple[0:3]
# reset run secs to the start of the timeframe
run_secs = start_secs
while run_secs < end_secs:
start_times.append(run_secs)
# increment current run secs by headway secs
run_secs += headway_secs
return start_times | [
"def",
"GetFrequencyStartTimes",
"(",
"self",
")",
":",
"start_times",
"=",
"[",
"]",
"# for each headway period of the trip",
"for",
"freq_tuple",
"in",
"self",
".",
"GetFrequencyTuples",
"(",
")",
":",
"(",
"start_secs",
",",
"end_secs",
",",
"headway_secs",
")"... | Return a list of start time for each headway-based run.
Returns:
a sorted list of seconds since midnight, the start time of each run. If
this trip doesn't have headways returns an empty list. | [
"Return",
"a",
"list",
"of",
"start",
"time",
"for",
"each",
"headway",
"-",
"based",
"run",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L327-L343 | train | 220,056 |
google/transitfeed | transitfeed/trip.py | Trip._GenerateStopTimesTuples | def _GenerateStopTimesTuples(self):
"""Generator for rows of the stop_times file"""
stoptimes = self.GetStopTimes()
for i, st in enumerate(stoptimes):
yield st.GetFieldValuesTuple(self.trip_id) | python | def _GenerateStopTimesTuples(self):
"""Generator for rows of the stop_times file"""
stoptimes = self.GetStopTimes()
for i, st in enumerate(stoptimes):
yield st.GetFieldValuesTuple(self.trip_id) | [
"def",
"_GenerateStopTimesTuples",
"(",
"self",
")",
":",
"stoptimes",
"=",
"self",
".",
"GetStopTimes",
"(",
")",
"for",
"i",
",",
"st",
"in",
"enumerate",
"(",
"stoptimes",
")",
":",
"yield",
"st",
".",
"GetFieldValuesTuple",
"(",
"self",
".",
"trip_id",... | Generator for rows of the stop_times file | [
"Generator",
"for",
"rows",
"of",
"the",
"stop_times",
"file"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L362-L366 | train | 220,057 |
google/transitfeed | transitfeed/trip.py | Trip.GetPattern | def GetPattern(self):
"""Return a tuple of Stop objects, in the order visited"""
stoptimes = self.GetStopTimes()
return tuple(st.stop for st in stoptimes) | python | def GetPattern(self):
"""Return a tuple of Stop objects, in the order visited"""
stoptimes = self.GetStopTimes()
return tuple(st.stop for st in stoptimes) | [
"def",
"GetPattern",
"(",
"self",
")",
":",
"stoptimes",
"=",
"self",
".",
"GetStopTimes",
"(",
")",
"return",
"tuple",
"(",
"st",
".",
"stop",
"for",
"st",
"in",
"stoptimes",
")"
] | Return a tuple of Stop objects, in the order visited | [
"Return",
"a",
"tuple",
"of",
"Stop",
"objects",
"in",
"the",
"order",
"visited"
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L374-L377 | train | 220,058 |
google/transitfeed | transitfeed/trip.py | Trip.AddHeadwayPeriodObject | def AddHeadwayPeriodObject(self, headway_period, problem_reporter):
"""Deprecated. Please use AddFrequencyObject instead."""
warnings.warn("No longer supported. The HeadwayPeriod class was renamed to "
"Frequency, and all related functions were renamed "
"accordingly.", DeprecationWarning)
self.AddFrequencyObject(frequency, problem_reporter) | python | def AddHeadwayPeriodObject(self, headway_period, problem_reporter):
"""Deprecated. Please use AddFrequencyObject instead."""
warnings.warn("No longer supported. The HeadwayPeriod class was renamed to "
"Frequency, and all related functions were renamed "
"accordingly.", DeprecationWarning)
self.AddFrequencyObject(frequency, problem_reporter) | [
"def",
"AddHeadwayPeriodObject",
"(",
"self",
",",
"headway_period",
",",
"problem_reporter",
")",
":",
"warnings",
".",
"warn",
"(",
"\"No longer supported. The HeadwayPeriod class was renamed to \"",
"\"Frequency, and all related functions were renamed \"",
"\"accordingly.\"",
",... | Deprecated. Please use AddFrequencyObject instead. | [
"Deprecated",
".",
"Please",
"use",
"AddFrequencyObject",
"instead",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L379-L384 | train | 220,059 |
google/transitfeed | transitfeed/trip.py | Trip.AddFrequencyObject | def AddFrequencyObject(self, frequency, problem_reporter):
"""Add a Frequency object to this trip's list of Frequencies."""
if frequency is not None:
self.AddFrequency(frequency.StartTime(),
frequency.EndTime(),
frequency.HeadwaySecs(),
frequency.ExactTimes(),
problem_reporter) | python | def AddFrequencyObject(self, frequency, problem_reporter):
"""Add a Frequency object to this trip's list of Frequencies."""
if frequency is not None:
self.AddFrequency(frequency.StartTime(),
frequency.EndTime(),
frequency.HeadwaySecs(),
frequency.ExactTimes(),
problem_reporter) | [
"def",
"AddFrequencyObject",
"(",
"self",
",",
"frequency",
",",
"problem_reporter",
")",
":",
"if",
"frequency",
"is",
"not",
"None",
":",
"self",
".",
"AddFrequency",
"(",
"frequency",
".",
"StartTime",
"(",
")",
",",
"frequency",
".",
"EndTime",
"(",
")... | Add a Frequency object to this trip's list of Frequencies. | [
"Add",
"a",
"Frequency",
"object",
"to",
"this",
"trip",
"s",
"list",
"of",
"Frequencies",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L386-L393 | train | 220,060 |
google/transitfeed | transitfeed/trip.py | Trip.AddHeadwayPeriod | def AddHeadwayPeriod(self, start_time, end_time, headway_secs,
problem_reporter=problems_module.default_problem_reporter):
"""Deprecated. Please use AddFrequency instead."""
warnings.warn("No longer supported. The HeadwayPeriod class was renamed to "
"Frequency, and all related functions were renamed "
"accordingly.", DeprecationWarning)
self.AddFrequency(start_time, end_time, headway_secs, problem_reporter) | python | def AddHeadwayPeriod(self, start_time, end_time, headway_secs,
problem_reporter=problems_module.default_problem_reporter):
"""Deprecated. Please use AddFrequency instead."""
warnings.warn("No longer supported. The HeadwayPeriod class was renamed to "
"Frequency, and all related functions were renamed "
"accordingly.", DeprecationWarning)
self.AddFrequency(start_time, end_time, headway_secs, problem_reporter) | [
"def",
"AddHeadwayPeriod",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"headway_secs",
",",
"problem_reporter",
"=",
"problems_module",
".",
"default_problem_reporter",
")",
":",
"warnings",
".",
"warn",
"(",
"\"No longer supported. The HeadwayPeriod class was r... | Deprecated. Please use AddFrequency instead. | [
"Deprecated",
".",
"Please",
"use",
"AddFrequency",
"instead",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L395-L401 | train | 220,061 |
google/transitfeed | transitfeed/trip.py | Trip.Validate | def Validate(self, problems, validate_children=True):
"""Validate attributes of this object.
Check that this object has all required values set to a valid value without
reference to the rest of the schedule. If the _schedule attribute is set
then check that references such as route_id and service_id are correct.
Args:
problems: A ProblemReporter object
validate_children: if True and the _schedule attribute is set than call
ValidateChildren
"""
self.ValidateRouteId(problems)
self.ValidateServicePeriod(problems)
self.ValidateDirectionId(problems)
self.ValidateTripId(problems)
self.ValidateShapeIdsExistInShapeList(problems)
self.ValidateRouteIdExistsInRouteList(problems)
self.ValidateServiceIdExistsInServiceList(problems)
self.ValidateBikesAllowed(problems)
self.ValidateWheelchairAccessible(problems)
if self._schedule and validate_children:
self.ValidateChildren(problems) | python | def Validate(self, problems, validate_children=True):
"""Validate attributes of this object.
Check that this object has all required values set to a valid value without
reference to the rest of the schedule. If the _schedule attribute is set
then check that references such as route_id and service_id are correct.
Args:
problems: A ProblemReporter object
validate_children: if True and the _schedule attribute is set than call
ValidateChildren
"""
self.ValidateRouteId(problems)
self.ValidateServicePeriod(problems)
self.ValidateDirectionId(problems)
self.ValidateTripId(problems)
self.ValidateShapeIdsExistInShapeList(problems)
self.ValidateRouteIdExistsInRouteList(problems)
self.ValidateServiceIdExistsInServiceList(problems)
self.ValidateBikesAllowed(problems)
self.ValidateWheelchairAccessible(problems)
if self._schedule and validate_children:
self.ValidateChildren(problems) | [
"def",
"Validate",
"(",
"self",
",",
"problems",
",",
"validate_children",
"=",
"True",
")",
":",
"self",
".",
"ValidateRouteId",
"(",
"problems",
")",
"self",
".",
"ValidateServicePeriod",
"(",
"problems",
")",
"self",
".",
"ValidateDirectionId",
"(",
"proble... | Validate attributes of this object.
Check that this object has all required values set to a valid value without
reference to the rest of the schedule. If the _schedule attribute is set
then check that references such as route_id and service_id are correct.
Args:
problems: A ProblemReporter object
validate_children: if True and the _schedule attribute is set than call
ValidateChildren | [
"Validate",
"attributes",
"of",
"this",
"object",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L551-L573 | train | 220,062 |
google/transitfeed | transitfeed/trip.py | Trip.ValidateChildren | def ValidateChildren(self, problems):
"""Validate StopTimes and headways of this trip."""
assert self._schedule, "Trip must be in a schedule to ValidateChildren"
# TODO: validate distance values in stop times (if applicable)
self.ValidateNoDuplicateStopSequences(problems)
stoptimes = self.GetStopTimes(problems)
stoptimes.sort(key=lambda x: x.stop_sequence)
self.ValidateTripStartAndEndTimes(problems, stoptimes)
self.ValidateStopTimesSequenceHasIncreasingTimeAndDistance(problems,
stoptimes)
self.ValidateShapeDistTraveledSmallerThanMaxShapeDistance(problems,
stoptimes)
self.ValidateDistanceFromStopToShape(problems, stoptimes)
self.ValidateFrequencies(problems) | python | def ValidateChildren(self, problems):
"""Validate StopTimes and headways of this trip."""
assert self._schedule, "Trip must be in a schedule to ValidateChildren"
# TODO: validate distance values in stop times (if applicable)
self.ValidateNoDuplicateStopSequences(problems)
stoptimes = self.GetStopTimes(problems)
stoptimes.sort(key=lambda x: x.stop_sequence)
self.ValidateTripStartAndEndTimes(problems, stoptimes)
self.ValidateStopTimesSequenceHasIncreasingTimeAndDistance(problems,
stoptimes)
self.ValidateShapeDistTraveledSmallerThanMaxShapeDistance(problems,
stoptimes)
self.ValidateDistanceFromStopToShape(problems, stoptimes)
self.ValidateFrequencies(problems) | [
"def",
"ValidateChildren",
"(",
"self",
",",
"problems",
")",
":",
"assert",
"self",
".",
"_schedule",
",",
"\"Trip must be in a schedule to ValidateChildren\"",
"# TODO: validate distance values in stop times (if applicable)",
"self",
".",
"ValidateNoDuplicateStopSequences",
"("... | Validate StopTimes and headways of this trip. | [
"Validate",
"StopTimes",
"and",
"headways",
"of",
"this",
"trip",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L697-L711 | train | 220,063 |
google/transitfeed | transitfeed/stoptime.py | StopTime.GetFieldValuesTuple | def GetFieldValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
GTFS file.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to an output file, we want empty values to be
# outputted as an empty string
result.append(getattr(self, fn) or '' )
return tuple(result) | python | def GetFieldValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
GTFS file.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to an output file, we want empty values to be
# outputted as an empty string
result.append(getattr(self, fn) or '' )
return tuple(result) | [
"def",
"GetFieldValuesTuple",
"(",
"self",
",",
"trip_id",
")",
":",
"result",
"=",
"[",
"]",
"for",
"fn",
"in",
"self",
".",
"_FIELD_NAMES",
":",
"if",
"fn",
"==",
"'trip_id'",
":",
"result",
".",
"append",
"(",
"trip_id",
")",
"else",
":",
"# Since w... | Return a tuple that outputs a row of _FIELD_NAMES to be written to a
GTFS file.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime. | [
"Return",
"a",
"tuple",
"that",
"outputs",
"a",
"row",
"of",
"_FIELD_NAMES",
"to",
"be",
"written",
"to",
"a",
"GTFS",
"file",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/stoptime.py#L165-L181 | train | 220,064 |
google/transitfeed | transitfeed/stoptime.py | StopTime.GetSqlValuesTuple | def GetSqlValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
SQLite database.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._SQL_FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to SQLite, we want empty values to be
# outputted as NULL string (contrary to what happens in
# GetFieldValuesTuple)
result.append(getattr(self, fn))
return tuple(result) | python | def GetSqlValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
SQLite database.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._SQL_FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to SQLite, we want empty values to be
# outputted as NULL string (contrary to what happens in
# GetFieldValuesTuple)
result.append(getattr(self, fn))
return tuple(result) | [
"def",
"GetSqlValuesTuple",
"(",
"self",
",",
"trip_id",
")",
":",
"result",
"=",
"[",
"]",
"for",
"fn",
"in",
"self",
".",
"_SQL_FIELD_NAMES",
":",
"if",
"fn",
"==",
"'trip_id'",
":",
"result",
".",
"append",
"(",
"trip_id",
")",
"else",
":",
"# Since... | Return a tuple that outputs a row of _FIELD_NAMES to be written to a
SQLite database.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime. | [
"Return",
"a",
"tuple",
"that",
"outputs",
"a",
"row",
"of",
"_FIELD_NAMES",
"to",
"be",
"written",
"to",
"a",
"SQLite",
"database",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/stoptime.py#L183-L201 | train | 220,065 |
google/transitfeed | transitfeed/stoptime.py | StopTime.GetTimeSecs | def GetTimeSecs(self):
"""Return the first of arrival_secs and departure_secs that is not None.
If both are None return None."""
if self.arrival_secs != None:
return self.arrival_secs
elif self.departure_secs != None:
return self.departure_secs
else:
return None | python | def GetTimeSecs(self):
"""Return the first of arrival_secs and departure_secs that is not None.
If both are None return None."""
if self.arrival_secs != None:
return self.arrival_secs
elif self.departure_secs != None:
return self.departure_secs
else:
return None | [
"def",
"GetTimeSecs",
"(",
"self",
")",
":",
"if",
"self",
".",
"arrival_secs",
"!=",
"None",
":",
"return",
"self",
".",
"arrival_secs",
"elif",
"self",
".",
"departure_secs",
"!=",
"None",
":",
"return",
"self",
".",
"departure_secs",
"else",
":",
"retur... | Return the first of arrival_secs and departure_secs that is not None.
If both are None return None. | [
"Return",
"the",
"first",
"of",
"arrival_secs",
"and",
"departure_secs",
"that",
"is",
"not",
"None",
".",
"If",
"both",
"are",
"None",
"return",
"None",
"."
] | eb2991a3747ba541b2cb66502b305b6304a1f85f | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/stoptime.py#L203-L211 | train | 220,066 |
equinor/segyio | python/segyio/create.py | create | def create(filename, spec):
"""Create a new segy file.
Create a new segy file with the geometry and properties given by `spec`.
This enables creating SEGY files from your data. The created file supports
all segyio modes, but has an emphasis on writing. The spec must be
complete, otherwise an exception will be raised. A default, empty spec can
be created with ``segyio.spec()``.
Very little data is written to the file, so just calling create is not
sufficient to re-read the file with segyio. Rather, every trace header and
trace must be written to the file to be considered complete.
Create should be used together with python's ``with`` statement. This ensure
the data is written. Please refer to the examples.
The ``segyio.spec()`` function will default sorting, offsets and everything
in the mandatory group, except format and samples, and requires the caller
to fill in *all* the fields in either of the exclusive groups.
If any field is missing from the first exclusive group, and the tracecount
is set, the resulting file will be considered unstructured. If the
tracecount is set, and all fields of the first exclusive group are
specified, the file is considered structured and the tracecount is inferred
from the xlines/ilines/offsets. The offsets are defaulted to ``[1]`` by
``segyio.spec()``.
Parameters
----------
filename : str
Path to file to create
spec : segyio.spec
Structure of the segy file
Returns
-------
file : segyio.SegyFile
An open segyio file handle, similar to that returned by `segyio.open`
See also
--------
segyio.spec : template for the `spec` argument
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.4
Support for creating unstructured files
.. versionchanged:: 1.8
Support for creating lsb files
The ``spec`` is any object that has the following attributes
Mandatory::
iline : int or segyio.BinField
xline : int or segyio.BinField
samples : array of int
format : { 1, 5 }
1 = IBM float, 5 = IEEE float
Exclusive::
ilines : array_like of int
xlines : array_like of int
offsets : array_like of int
sorting : int or segyio.TraceSortingFormat
OR
tracecount : int
Optional::
ext_headers : int
endian : str { 'big', 'msb', 'little', 'lsb' }
defaults to 'big'
Examples
--------
Create a file:
>>> spec = segyio.spec()
>>> spec.ilines = [1, 2, 3, 4]
>>> spec.xlines = [11, 12, 13]
>>> spec.samples = list(range(50))
>>> spec.sorting = 2
>>> spec.format = 1
>>> with segyio.create(path, spec) as f:
... ## fill the file with data
... pass
...
Copy a file, but shorten all traces by 50 samples:
>>> with segyio.open(srcpath) as src:
... spec = segyio.spec()
... spec.sorting = src.sorting
... spec.format = src.format
... spec.samples = src.samples[:len(src.samples) - 50]
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace
Copy a file, but shift samples time by 50:
>>> with segyio.open(srcpath) as src:
... delrt = 50
... spec = segyio.spec()
... spec.samples = src.samples + delrt
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.header = { TraceField.DelayRecordingTime: delrt }
... dst.trace = src.trace
Copy a file, but shorten all traces by 50 samples (since v1.4):
>>> with segyio.open(srcpath) as src:
... spec = segyio.tools.metadata(src)
... spec.samples = spec.samples[:len(spec.samples) - 50]
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace
"""
from . import _segyio
if not structured(spec):
tracecount = spec.tracecount
else:
tracecount = len(spec.ilines) * len(spec.xlines) * len(spec.offsets)
ext_headers = spec.ext_headers if hasattr(spec, 'ext_headers') else 0
samples = numpy.asarray(spec.samples)
endians = {
'lsb': 256, # (1 << 8)
'little': 256,
'msb': 0,
'big': 0,
}
endian = spec.endian if hasattr(spec, 'endian') else 'big'
if endian is None:
endian = 'big'
if endian not in endians:
problem = 'unknown endianness {}, expected one of: '
opts = ' '.join(endians.keys())
raise ValueError(problem.format(endian) + opts)
fd = _segyio.segyiofd(str(filename), 'w+', endians[endian])
fd.segymake(
samples = len(samples),
tracecount = tracecount,
format = int(spec.format),
ext_headers = int(ext_headers),
)
f = segyio.SegyFile(fd,
filename = str(filename),
mode = 'w+',
iline = int(spec.iline),
xline = int(spec.xline),
endian = endian,
)
f._samples = samples
if structured(spec):
sorting = spec.sorting if hasattr(spec, 'sorting') else None
if sorting is None:
sorting = TraceSortingFormat.INLINE_SORTING
f.interpret(spec.ilines, spec.xlines, spec.offsets, sorting)
f.text[0] = default_text_header(f._il, f._xl, segyio.TraceField.offset)
if len(samples) == 1:
interval = int(samples[0] * 1000)
else:
interval = int((samples[1] - samples[0]) * 1000)
f.bin.update(
ntrpr = tracecount,
nart = tracecount,
hdt = interval,
dto = interval,
hns = len(samples),
nso = len(samples),
format = int(spec.format),
exth = ext_headers,
)
return f | python | def create(filename, spec):
"""Create a new segy file.
Create a new segy file with the geometry and properties given by `spec`.
This enables creating SEGY files from your data. The created file supports
all segyio modes, but has an emphasis on writing. The spec must be
complete, otherwise an exception will be raised. A default, empty spec can
be created with ``segyio.spec()``.
Very little data is written to the file, so just calling create is not
sufficient to re-read the file with segyio. Rather, every trace header and
trace must be written to the file to be considered complete.
Create should be used together with python's ``with`` statement. This ensure
the data is written. Please refer to the examples.
The ``segyio.spec()`` function will default sorting, offsets and everything
in the mandatory group, except format and samples, and requires the caller
to fill in *all* the fields in either of the exclusive groups.
If any field is missing from the first exclusive group, and the tracecount
is set, the resulting file will be considered unstructured. If the
tracecount is set, and all fields of the first exclusive group are
specified, the file is considered structured and the tracecount is inferred
from the xlines/ilines/offsets. The offsets are defaulted to ``[1]`` by
``segyio.spec()``.
Parameters
----------
filename : str
Path to file to create
spec : segyio.spec
Structure of the segy file
Returns
-------
file : segyio.SegyFile
An open segyio file handle, similar to that returned by `segyio.open`
See also
--------
segyio.spec : template for the `spec` argument
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.4
Support for creating unstructured files
.. versionchanged:: 1.8
Support for creating lsb files
The ``spec`` is any object that has the following attributes
Mandatory::
iline : int or segyio.BinField
xline : int or segyio.BinField
samples : array of int
format : { 1, 5 }
1 = IBM float, 5 = IEEE float
Exclusive::
ilines : array_like of int
xlines : array_like of int
offsets : array_like of int
sorting : int or segyio.TraceSortingFormat
OR
tracecount : int
Optional::
ext_headers : int
endian : str { 'big', 'msb', 'little', 'lsb' }
defaults to 'big'
Examples
--------
Create a file:
>>> spec = segyio.spec()
>>> spec.ilines = [1, 2, 3, 4]
>>> spec.xlines = [11, 12, 13]
>>> spec.samples = list(range(50))
>>> spec.sorting = 2
>>> spec.format = 1
>>> with segyio.create(path, spec) as f:
... ## fill the file with data
... pass
...
Copy a file, but shorten all traces by 50 samples:
>>> with segyio.open(srcpath) as src:
... spec = segyio.spec()
... spec.sorting = src.sorting
... spec.format = src.format
... spec.samples = src.samples[:len(src.samples) - 50]
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace
Copy a file, but shift samples time by 50:
>>> with segyio.open(srcpath) as src:
... delrt = 50
... spec = segyio.spec()
... spec.samples = src.samples + delrt
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.header = { TraceField.DelayRecordingTime: delrt }
... dst.trace = src.trace
Copy a file, but shorten all traces by 50 samples (since v1.4):
>>> with segyio.open(srcpath) as src:
... spec = segyio.tools.metadata(src)
... spec.samples = spec.samples[:len(spec.samples) - 50]
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace
"""
from . import _segyio
if not structured(spec):
tracecount = spec.tracecount
else:
tracecount = len(spec.ilines) * len(spec.xlines) * len(spec.offsets)
ext_headers = spec.ext_headers if hasattr(spec, 'ext_headers') else 0
samples = numpy.asarray(spec.samples)
endians = {
'lsb': 256, # (1 << 8)
'little': 256,
'msb': 0,
'big': 0,
}
endian = spec.endian if hasattr(spec, 'endian') else 'big'
if endian is None:
endian = 'big'
if endian not in endians:
problem = 'unknown endianness {}, expected one of: '
opts = ' '.join(endians.keys())
raise ValueError(problem.format(endian) + opts)
fd = _segyio.segyiofd(str(filename), 'w+', endians[endian])
fd.segymake(
samples = len(samples),
tracecount = tracecount,
format = int(spec.format),
ext_headers = int(ext_headers),
)
f = segyio.SegyFile(fd,
filename = str(filename),
mode = 'w+',
iline = int(spec.iline),
xline = int(spec.xline),
endian = endian,
)
f._samples = samples
if structured(spec):
sorting = spec.sorting if hasattr(spec, 'sorting') else None
if sorting is None:
sorting = TraceSortingFormat.INLINE_SORTING
f.interpret(spec.ilines, spec.xlines, spec.offsets, sorting)
f.text[0] = default_text_header(f._il, f._xl, segyio.TraceField.offset)
if len(samples) == 1:
interval = int(samples[0] * 1000)
else:
interval = int((samples[1] - samples[0]) * 1000)
f.bin.update(
ntrpr = tracecount,
nart = tracecount,
hdt = interval,
dto = interval,
hns = len(samples),
nso = len(samples),
format = int(spec.format),
exth = ext_headers,
)
return f | [
"def",
"create",
"(",
"filename",
",",
"spec",
")",
":",
"from",
".",
"import",
"_segyio",
"if",
"not",
"structured",
"(",
"spec",
")",
":",
"tracecount",
"=",
"spec",
".",
"tracecount",
"else",
":",
"tracecount",
"=",
"len",
"(",
"spec",
".",
"ilines"... | Create a new segy file.
Create a new segy file with the geometry and properties given by `spec`.
This enables creating SEGY files from your data. The created file supports
all segyio modes, but has an emphasis on writing. The spec must be
complete, otherwise an exception will be raised. A default, empty spec can
be created with ``segyio.spec()``.
Very little data is written to the file, so just calling create is not
sufficient to re-read the file with segyio. Rather, every trace header and
trace must be written to the file to be considered complete.
Create should be used together with python's ``with`` statement. This ensure
the data is written. Please refer to the examples.
The ``segyio.spec()`` function will default sorting, offsets and everything
in the mandatory group, except format and samples, and requires the caller
to fill in *all* the fields in either of the exclusive groups.
If any field is missing from the first exclusive group, and the tracecount
is set, the resulting file will be considered unstructured. If the
tracecount is set, and all fields of the first exclusive group are
specified, the file is considered structured and the tracecount is inferred
from the xlines/ilines/offsets. The offsets are defaulted to ``[1]`` by
``segyio.spec()``.
Parameters
----------
filename : str
Path to file to create
spec : segyio.spec
Structure of the segy file
Returns
-------
file : segyio.SegyFile
An open segyio file handle, similar to that returned by `segyio.open`
See also
--------
segyio.spec : template for the `spec` argument
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.4
Support for creating unstructured files
.. versionchanged:: 1.8
Support for creating lsb files
The ``spec`` is any object that has the following attributes
Mandatory::
iline : int or segyio.BinField
xline : int or segyio.BinField
samples : array of int
format : { 1, 5 }
1 = IBM float, 5 = IEEE float
Exclusive::
ilines : array_like of int
xlines : array_like of int
offsets : array_like of int
sorting : int or segyio.TraceSortingFormat
OR
tracecount : int
Optional::
ext_headers : int
endian : str { 'big', 'msb', 'little', 'lsb' }
defaults to 'big'
Examples
--------
Create a file:
>>> spec = segyio.spec()
>>> spec.ilines = [1, 2, 3, 4]
>>> spec.xlines = [11, 12, 13]
>>> spec.samples = list(range(50))
>>> spec.sorting = 2
>>> spec.format = 1
>>> with segyio.create(path, spec) as f:
... ## fill the file with data
... pass
...
Copy a file, but shorten all traces by 50 samples:
>>> with segyio.open(srcpath) as src:
... spec = segyio.spec()
... spec.sorting = src.sorting
... spec.format = src.format
... spec.samples = src.samples[:len(src.samples) - 50]
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace
Copy a file, but shift samples time by 50:
>>> with segyio.open(srcpath) as src:
... delrt = 50
... spec = segyio.spec()
... spec.samples = src.samples + delrt
... spec.ilines = src.ilines
... spec.xline = src.xlines
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.header = { TraceField.DelayRecordingTime: delrt }
... dst.trace = src.trace
Copy a file, but shorten all traces by 50 samples (since v1.4):
>>> with segyio.open(srcpath) as src:
... spec = segyio.tools.metadata(src)
... spec.samples = spec.samples[:len(spec.samples) - 50]
... with segyio.create(dstpath, spec) as dst:
... dst.text[0] = src.text[0]
... dst.bin = src.bin
... dst.header = src.header
... dst.trace = src.trace | [
"Create",
"a",
"new",
"segy",
"file",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/create.py#L38-L246 | train | 220,067 |
equinor/segyio | python/segyio/su/file.py | open | def open(filename, mode = 'r', iline = 189,
xline = 193,
strict = True,
ignore_geometry = False,
endian = 'big' ):
"""Open a seismic unix file.
Behaves identically to open(), except it expects the seismic unix format,
not SEG-Y.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.su.file
An open seismic unix file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
See also
--------
segyio.open : SEG-Y open
Notes
-----
.. versionadded:: 1.8
"""
if 'w' in mode:
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError(', '.join((problem, solution)))
endians = {
'little': 256, # (1 << 8)
'lsb': 256,
'big': 0,
'msb': 0,
}
if endian not in endians:
problem = 'unknown endianness, must be one of: '
candidates = ' '.join(endians.keys())
raise ValueError(problem + candidates)
from .. import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.suopen()
metrics = fd.metrics()
f = sufile(
fd,
filename = str(filename),
mode = mode,
iline = iline,
xline = xline,
)
h0 = f.header[0]
dt = h0[words.dt] / 1000.0
t0 = h0[words.delrt]
samples = metrics['samplecount']
f._samples = (numpy.arange(samples) * dt) + t0
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) | python | def open(filename, mode = 'r', iline = 189,
xline = 193,
strict = True,
ignore_geometry = False,
endian = 'big' ):
"""Open a seismic unix file.
Behaves identically to open(), except it expects the seismic unix format,
not SEG-Y.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.su.file
An open seismic unix file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
See also
--------
segyio.open : SEG-Y open
Notes
-----
.. versionadded:: 1.8
"""
if 'w' in mode:
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError(', '.join((problem, solution)))
endians = {
'little': 256, # (1 << 8)
'lsb': 256,
'big': 0,
'msb': 0,
}
if endian not in endians:
problem = 'unknown endianness, must be one of: '
candidates = ' '.join(endians.keys())
raise ValueError(problem + candidates)
from .. import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.suopen()
metrics = fd.metrics()
f = sufile(
fd,
filename = str(filename),
mode = mode,
iline = iline,
xline = xline,
)
h0 = f.header[0]
dt = h0[words.dt] / 1000.0
t0 = h0[words.delrt]
samples = metrics['samplecount']
f._samples = (numpy.arange(samples) * dt) + t0
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) | [
"def",
"open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"iline",
"=",
"189",
",",
"xline",
"=",
"193",
",",
"strict",
"=",
"True",
",",
"ignore_geometry",
"=",
"False",
",",
"endian",
"=",
"'big'",
")",
":",
"if",
"'w'",
"in",
"mode",
":",
... | Open a seismic unix file.
Behaves identically to open(), except it expects the seismic unix format,
not SEG-Y.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.su.file
An open seismic unix file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
See also
--------
segyio.open : SEG-Y open
Notes
-----
.. versionadded:: 1.8 | [
"Open",
"a",
"seismic",
"unix",
"file",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/su/file.py#L23-L118 | train | 220,068 |
equinor/segyio | python/segyio/tools.py | create_text_header | def create_text_header(lines):
"""Format textual header
Create a "correct" SEG-Y textual header. Every line will be prefixed with
C## and there are 40 lines. The input must be a dictionary with the line
number[1-40] as a key. The value for each key should be up to 76 character
long string.
Parameters
----------
lines : dict
`lines` dictionary with fields:
- ``no`` : line number (`int`)
- ``line`` : line (`str`)
Returns
-------
text : str
"""
rows = []
for line_no in range(1, 41):
line = ""
if line_no in lines:
line = lines[line_no]
row = "C{0:>2} {1:76}".format(line_no, line)
rows.append(row)
rows = ''.join(rows)
return rows | python | def create_text_header(lines):
"""Format textual header
Create a "correct" SEG-Y textual header. Every line will be prefixed with
C## and there are 40 lines. The input must be a dictionary with the line
number[1-40] as a key. The value for each key should be up to 76 character
long string.
Parameters
----------
lines : dict
`lines` dictionary with fields:
- ``no`` : line number (`int`)
- ``line`` : line (`str`)
Returns
-------
text : str
"""
rows = []
for line_no in range(1, 41):
line = ""
if line_no in lines:
line = lines[line_no]
row = "C{0:>2} {1:76}".format(line_no, line)
rows.append(row)
rows = ''.join(rows)
return rows | [
"def",
"create_text_header",
"(",
"lines",
")",
":",
"rows",
"=",
"[",
"]",
"for",
"line_no",
"in",
"range",
"(",
"1",
",",
"41",
")",
":",
"line",
"=",
"\"\"",
"if",
"line_no",
"in",
"lines",
":",
"line",
"=",
"lines",
"[",
"line_no",
"]",
"row",
... | Format textual header
Create a "correct" SEG-Y textual header. Every line will be prefixed with
C## and there are 40 lines. The input must be a dictionary with the line
number[1-40] as a key. The value for each key should be up to 76 character
long string.
Parameters
----------
lines : dict
`lines` dictionary with fields:
- ``no`` : line number (`int`)
- ``line`` : line (`str`)
Returns
-------
text : str | [
"Format",
"textual",
"header"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L65-L98 | train | 220,069 |
equinor/segyio | python/segyio/tools.py | wrap | def wrap(s, width=80):
"""
Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1
"""
return '\n'.join(textwrap.wrap(str(s), width=width)) | python | def wrap(s, width=80):
"""
Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1
"""
return '\n'.join(textwrap.wrap(str(s), width=width)) | [
"def",
"wrap",
"(",
"s",
",",
"width",
"=",
"80",
")",
":",
"return",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"str",
"(",
"s",
")",
",",
"width",
"=",
"width",
")",
")"
] | Formats the text input with newlines given the user specified width for
each line.
Parameters
----------
s : str
width : int
Returns
-------
text : str
Notes
-----
.. versionadded:: 1.1 | [
"Formats",
"the",
"text",
"input",
"with",
"newlines",
"given",
"the",
"user",
"specified",
"width",
"for",
"each",
"line",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L100-L122 | train | 220,070 |
equinor/segyio | python/segyio/tools.py | native | def native(data,
format = segyio.SegySampleFormat.IBM_FLOAT_4_BYTE,
copy = True):
"""Convert numpy array to native float
Converts a numpy array from raw segy trace data to native floats. Works for numpy ndarrays.
Parameters
----------
data : numpy.ndarray
format : int or segyio.SegySampleFormat
copy : bool
If True, convert on a copy, and leave the input array unmodified
Returns
-------
data : numpy.ndarray
Notes
-----
.. versionadded:: 1.1
Examples
--------
Convert mmap'd trace to native float:
>>> d = np.memmap('file.sgy', offset = 3600, dtype = np.uintc)
>>> samples = 1500
>>> trace = segyio.tools.native(d[240:240+samples])
"""
data = data.view( dtype = np.single )
if copy:
data = np.copy( data )
format = int(segyio.SegySampleFormat(format))
return segyio._segyio.native(data, format) | python | def native(data,
format = segyio.SegySampleFormat.IBM_FLOAT_4_BYTE,
copy = True):
"""Convert numpy array to native float
Converts a numpy array from raw segy trace data to native floats. Works for numpy ndarrays.
Parameters
----------
data : numpy.ndarray
format : int or segyio.SegySampleFormat
copy : bool
If True, convert on a copy, and leave the input array unmodified
Returns
-------
data : numpy.ndarray
Notes
-----
.. versionadded:: 1.1
Examples
--------
Convert mmap'd trace to native float:
>>> d = np.memmap('file.sgy', offset = 3600, dtype = np.uintc)
>>> samples = 1500
>>> trace = segyio.tools.native(d[240:240+samples])
"""
data = data.view( dtype = np.single )
if copy:
data = np.copy( data )
format = int(segyio.SegySampleFormat(format))
return segyio._segyio.native(data, format) | [
"def",
"native",
"(",
"data",
",",
"format",
"=",
"segyio",
".",
"SegySampleFormat",
".",
"IBM_FLOAT_4_BYTE",
",",
"copy",
"=",
"True",
")",
":",
"data",
"=",
"data",
".",
"view",
"(",
"dtype",
"=",
"np",
".",
"single",
")",
"if",
"copy",
":",
"data"... | Convert numpy array to native float
Converts a numpy array from raw segy trace data to native floats. Works for numpy ndarrays.
Parameters
----------
data : numpy.ndarray
format : int or segyio.SegySampleFormat
copy : bool
If True, convert on a copy, and leave the input array unmodified
Returns
-------
data : numpy.ndarray
Notes
-----
.. versionadded:: 1.1
Examples
--------
Convert mmap'd trace to native float:
>>> d = np.memmap('file.sgy', offset = 3600, dtype = np.uintc)
>>> samples = 1500
>>> trace = segyio.tools.native(d[240:240+samples]) | [
"Convert",
"numpy",
"array",
"to",
"native",
"float"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L125-L166 | train | 220,071 |
equinor/segyio | python/segyio/tools.py | cube | def cube(f):
"""Read a full cube from a file
Takes an open segy file (created with segyio.open) or a file name.
If the file is a prestack file, the cube returned has the dimensions
``(fast, slow, offset, sample)``. If it is post-stack (only the one
offset), the dimensions are normalised to ``(fast, slow, sample)``
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
cube : numpy.ndarray
Notes
-----
.. versionadded:: 1.1
"""
if not isinstance(f, segyio.SegyFile):
with segyio.open(f) as fl:
return cube(fl)
ilsort = f.sorting == segyio.TraceSortingFormat.INLINE_SORTING
fast = f.ilines if ilsort else f.xlines
slow = f.xlines if ilsort else f.ilines
fast, slow, offs = len(fast), len(slow), len(f.offsets)
smps = len(f.samples)
dims = (fast, slow, smps) if offs == 1 else (fast, slow, offs, smps)
return f.trace.raw[:].reshape(dims) | python | def cube(f):
"""Read a full cube from a file
Takes an open segy file (created with segyio.open) or a file name.
If the file is a prestack file, the cube returned has the dimensions
``(fast, slow, offset, sample)``. If it is post-stack (only the one
offset), the dimensions are normalised to ``(fast, slow, sample)``
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
cube : numpy.ndarray
Notes
-----
.. versionadded:: 1.1
"""
if not isinstance(f, segyio.SegyFile):
with segyio.open(f) as fl:
return cube(fl)
ilsort = f.sorting == segyio.TraceSortingFormat.INLINE_SORTING
fast = f.ilines if ilsort else f.xlines
slow = f.xlines if ilsort else f.ilines
fast, slow, offs = len(fast), len(slow), len(f.offsets)
smps = len(f.samples)
dims = (fast, slow, smps) if offs == 1 else (fast, slow, offs, smps)
return f.trace.raw[:].reshape(dims) | [
"def",
"cube",
"(",
"f",
")",
":",
"if",
"not",
"isinstance",
"(",
"f",
",",
"segyio",
".",
"SegyFile",
")",
":",
"with",
"segyio",
".",
"open",
"(",
"f",
")",
"as",
"fl",
":",
"return",
"cube",
"(",
"fl",
")",
"ilsort",
"=",
"f",
".",
"sorting... | Read a full cube from a file
Takes an open segy file (created with segyio.open) or a file name.
If the file is a prestack file, the cube returned has the dimensions
``(fast, slow, offset, sample)``. If it is post-stack (only the one
offset), the dimensions are normalised to ``(fast, slow, sample)``
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
cube : numpy.ndarray
Notes
-----
.. versionadded:: 1.1 | [
"Read",
"a",
"full",
"cube",
"from",
"a",
"file"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L203-L239 | train | 220,072 |
equinor/segyio | python/segyio/tools.py | rotation | def rotation(f, line = 'fast'):
""" Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2
"""
if f.unstructured:
raise ValueError("Rotation requires a structured file")
lines = { 'fast': f.fast,
'slow': f.slow,
'iline': f.iline,
'xline': f.xline,
}
if line not in lines:
error = "Unknown line {}".format(line)
solution = "Must be any of: {}".format(' '.join(lines.keys()))
raise ValueError('{} {}'.format(error, solution))
l = lines[line]
origin = f.header[0][segyio.su.cdpx, segyio.su.cdpy]
cdpx, cdpy = origin[segyio.su.cdpx], origin[segyio.su.cdpy]
rot = f.xfd.rotation( len(l),
l.stride,
len(f.offsets),
np.fromiter(l.keys(), dtype = np.intc) )
return rot, cdpx, cdpy | python | def rotation(f, line = 'fast'):
""" Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2
"""
if f.unstructured:
raise ValueError("Rotation requires a structured file")
lines = { 'fast': f.fast,
'slow': f.slow,
'iline': f.iline,
'xline': f.xline,
}
if line not in lines:
error = "Unknown line {}".format(line)
solution = "Must be any of: {}".format(' '.join(lines.keys()))
raise ValueError('{} {}'.format(error, solution))
l = lines[line]
origin = f.header[0][segyio.su.cdpx, segyio.su.cdpy]
cdpx, cdpy = origin[segyio.su.cdpx], origin[segyio.su.cdpy]
rot = f.xfd.rotation( len(l),
l.stride,
len(f.offsets),
np.fromiter(l.keys(), dtype = np.intc) )
return rot, cdpx, cdpy | [
"def",
"rotation",
"(",
"f",
",",
"line",
"=",
"'fast'",
")",
":",
"if",
"f",
".",
"unstructured",
":",
"raise",
"ValueError",
"(",
"\"Rotation requires a structured file\"",
")",
"lines",
"=",
"{",
"'fast'",
":",
"f",
".",
"fast",
",",
"'slow'",
":",
"f... | Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2 | [
"Find",
"rotation",
"of",
"the",
"survey"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L241-L297 | train | 220,073 |
equinor/segyio | python/segyio/tools.py | metadata | def metadata(f):
"""Get survey structural properties and metadata
Create a description object that, when passed to ``segyio.create()``, would
create a new file with the same structure, dimensions, and metadata as
``f``.
Takes an open segy file (created with segyio.open) or a file name.
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
spec : segyio.spec
Notes
-----
.. versionadded:: 1.4
"""
if not isinstance(f, segyio.SegyFile):
with segyio.open(f) as fl:
return metadata(fl)
spec = segyio.spec()
spec.iline = f._il
spec.xline = f._xl
spec.samples = f.samples
spec.format = f.format
spec.ilines = f.ilines
spec.xlines = f.xlines
spec.offsets = f.offsets
spec.sorting = f.sorting
spec.tracecount = f.tracecount
spec.ext_headers = f.ext_headers
spec.endian = f.endian
return spec | python | def metadata(f):
"""Get survey structural properties and metadata
Create a description object that, when passed to ``segyio.create()``, would
create a new file with the same structure, dimensions, and metadata as
``f``.
Takes an open segy file (created with segyio.open) or a file name.
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
spec : segyio.spec
Notes
-----
.. versionadded:: 1.4
"""
if not isinstance(f, segyio.SegyFile):
with segyio.open(f) as fl:
return metadata(fl)
spec = segyio.spec()
spec.iline = f._il
spec.xline = f._xl
spec.samples = f.samples
spec.format = f.format
spec.ilines = f.ilines
spec.xlines = f.xlines
spec.offsets = f.offsets
spec.sorting = f.sorting
spec.tracecount = f.tracecount
spec.ext_headers = f.ext_headers
spec.endian = f.endian
return spec | [
"def",
"metadata",
"(",
"f",
")",
":",
"if",
"not",
"isinstance",
"(",
"f",
",",
"segyio",
".",
"SegyFile",
")",
":",
"with",
"segyio",
".",
"open",
"(",
"f",
")",
"as",
"fl",
":",
"return",
"metadata",
"(",
"fl",
")",
"spec",
"=",
"segyio",
".",... | Get survey structural properties and metadata
Create a description object that, when passed to ``segyio.create()``, would
create a new file with the same structure, dimensions, and metadata as
``f``.
Takes an open segy file (created with segyio.open) or a file name.
Parameters
----------
f : str or segyio.SegyFile
Returns
-------
spec : segyio.spec
Notes
-----
.. versionadded:: 1.4 | [
"Get",
"survey",
"structural",
"properties",
"and",
"metadata"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L299-L345 | train | 220,074 |
equinor/segyio | python/segyio/tools.py | resample | def resample(f, rate = None, delay = None, micro = False,
trace = True,
binary = True):
"""Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4
"""
if rate is not None:
if not micro: rate *= 1000
if binary: f.bin[segyio.su.hdt] = rate
if trace: f.header = { segyio.su.dt: rate}
if delay is not None:
f.header = { segyio.su.delrt: delay }
t0 = delay if delay is not None else f.samples[0]
rate = rate / 1000 if rate is not None else f.samples[1] - f.samples[0]
f._samples = (np.arange(len(f.samples)) * rate) + t0
return f | python | def resample(f, rate = None, delay = None, micro = False,
trace = True,
binary = True):
"""Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4
"""
if rate is not None:
if not micro: rate *= 1000
if binary: f.bin[segyio.su.hdt] = rate
if trace: f.header = { segyio.su.dt: rate}
if delay is not None:
f.header = { segyio.su.delrt: delay }
t0 = delay if delay is not None else f.samples[0]
rate = rate / 1000 if rate is not None else f.samples[1] - f.samples[0]
f._samples = (np.arange(len(f.samples)) * rate) + t0
return f | [
"def",
"resample",
"(",
"f",
",",
"rate",
"=",
"None",
",",
"delay",
"=",
"None",
",",
"micro",
"=",
"False",
",",
"trace",
"=",
"True",
",",
"binary",
"=",
"True",
")",
":",
"if",
"rate",
"is",
"not",
"None",
":",
"if",
"not",
"micro",
":",
"r... | Resample a file
Resample all data traces, and update the file handle to reflect the new
sample rate. No actual samples (data traces) are modified, only the header
fields and interpretation.
By default, the rate and the delay are in millseconds - if you need higher
resolution, passing micro=True interprets rate as microseconds (as it is
represented in the file). Delay is always milliseconds.
By default, both the global binary header and the trace headers are updated
to reflect this. If preserving either the trace header interval field or
the binary header interval field is important, pass trace=False and
binary=False respectively, to not have that field updated. This only apply
to sample rates - the recording delay is only found in trace headers and
will be written unconditionally, if delay is not None.
.. warning::
This function requires an open file handle and is **DESTRUCTIVE**. It
will modify the file, and if an exception is raised then partial writes
might have happened and the file might be corrupted.
This function assumes all traces have uniform delays and frequencies.
Parameters
----------
f : SegyFile
rate : int
delay : int
micro : bool
if True, interpret rate as microseconds
trace : bool
Update the trace header if True
binary : bool
Update the binary header if True
Notes
-----
.. versionadded:: 1.4 | [
"Resample",
"a",
"file"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L347-L408 | train | 220,075 |
equinor/segyio | python/segyio/tools.py | from_array3D | def from_array3D(filename, data, iline=189,
xline=193,
format=SegySampleFormat.IBM_FLOAT_4_BYTE,
dt=4000,
delrt=0):
""" Create a new SEGY file from a 3D array
Create an structured SEGY file with defaulted headers from a 3-dimensional
array. The file is inline-sorted. ilines, xlines and samples are inferred
from the array. Structure-defining fields in the binary header and
in the traceheaders are set accordingly. Such fields include, but are not
limited to iline, xline and offset. The file also contains a defaulted
textual header.
The 3-dimensional array is interpreted as::
xl0 xl1 xl2
-----------------
/ | tr0 | tr1 | tr2 | il0
-----------------
| / | tr3 | tr4 | tr5 | il1
-----------------
| / | tr6 | tr7 | tr8 | il2
-----------------
| / / / / n-samples
------------------
ilines = [1, len(axis(0) + 1]
xlines = [1, len(axis(1) + 1]
samples = [0, len(axis(2)]
Parameters
----------
filename : string-like
Path to new file
data : 3-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array3D(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
...
"""
data = np.asarray(data)
dimensions = len(data.shape)
if dimensions != 3:
problem = "Expected 3 dimensions, {} was given".format(dimensions)
raise ValueError(problem)
from_array(filename, data, iline=iline, xline=xline, format=format,
dt=dt,
delrt=delrt) | python | def from_array3D(filename, data, iline=189,
xline=193,
format=SegySampleFormat.IBM_FLOAT_4_BYTE,
dt=4000,
delrt=0):
""" Create a new SEGY file from a 3D array
Create an structured SEGY file with defaulted headers from a 3-dimensional
array. The file is inline-sorted. ilines, xlines and samples are inferred
from the array. Structure-defining fields in the binary header and
in the traceheaders are set accordingly. Such fields include, but are not
limited to iline, xline and offset. The file also contains a defaulted
textual header.
The 3-dimensional array is interpreted as::
xl0 xl1 xl2
-----------------
/ | tr0 | tr1 | tr2 | il0
-----------------
| / | tr3 | tr4 | tr5 | il1
-----------------
| / | tr6 | tr7 | tr8 | il2
-----------------
| / / / / n-samples
------------------
ilines = [1, len(axis(0) + 1]
xlines = [1, len(axis(1) + 1]
samples = [0, len(axis(2)]
Parameters
----------
filename : string-like
Path to new file
data : 3-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array3D(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
...
"""
data = np.asarray(data)
dimensions = len(data.shape)
if dimensions != 3:
problem = "Expected 3 dimensions, {} was given".format(dimensions)
raise ValueError(problem)
from_array(filename, data, iline=iline, xline=xline, format=format,
dt=dt,
delrt=delrt) | [
"def",
"from_array3D",
"(",
"filename",
",",
"data",
",",
"iline",
"=",
"189",
",",
"xline",
"=",
"193",
",",
"format",
"=",
"SegySampleFormat",
".",
"IBM_FLOAT_4_BYTE",
",",
"dt",
"=",
"4000",
",",
"delrt",
"=",
"0",
")",
":",
"data",
"=",
"np",
"."... | Create a new SEGY file from a 3D array
Create an structured SEGY file with defaulted headers from a 3-dimensional
array. The file is inline-sorted. ilines, xlines and samples are inferred
from the array. Structure-defining fields in the binary header and
in the traceheaders are set accordingly. Such fields include, but are not
limited to iline, xline and offset. The file also contains a defaulted
textual header.
The 3-dimensional array is interpreted as::
xl0 xl1 xl2
-----------------
/ | tr0 | tr1 | tr2 | il0
-----------------
| / | tr3 | tr4 | tr5 | il1
-----------------
| / | tr6 | tr7 | tr8 | il2
-----------------
| / / / / n-samples
------------------
ilines = [1, len(axis(0) + 1]
xlines = [1, len(axis(1) + 1]
samples = [0, len(axis(2)]
Parameters
----------
filename : string-like
Path to new file
data : 3-dimensional array-like
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
format : int or segyio.SegySampleFormat
Sample format field in the trace header. Defaults to IBM float 4 byte
dt : int-like
sample interval
delrt : int-like
Notes
-----
.. versionadded:: 1.8
Examples
--------
Create a file from a 3D array, open it and read an iline:
>>> segyio.tools.from_array3D(path, array3d)
>>> segyio.open(path, mode) as f:
... iline = f.iline[0]
... | [
"Create",
"a",
"new",
"SEGY",
"file",
"from",
"a",
"3D",
"array",
"Create",
"an",
"structured",
"SEGY",
"file",
"with",
"defaulted",
"headers",
"from",
"a",
"3",
"-",
"dimensional",
"array",
".",
"The",
"file",
"is",
"inline",
"-",
"sorted",
".",
"ilines... | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/tools.py#L592-L662 | train | 220,076 |
equinor/segyio | python/segyio/open.py | open | def open(filename, mode="r", iline = 189,
xline = 193,
strict = True,
ignore_geometry = False,
endian = 'big'):
"""Open a segy file.
Opens a segy file and tries to figure out its sorting, inline numbers,
crossline numbers, and offsets, and enables reading and writing to this
file in a simple manner.
For reading, the access mode `r` is preferred. All write operations will
raise an exception. For writing, the mode `r+` is preferred (as `rw` would
truncate the file). Any mode with `w` will raise an error. The modes used
are standard C file modes; please refer to that documentation for a
complete reference.
Open should be used together with python's ``with`` statement. Please refer
to the examples. When the ``with`` statement is used the file will
automatically be closed when the routine completes or an exception is
raised.
By default, segyio tries to open in ``strict`` mode. This means the file will
be assumed to represent a geometry with consistent inline, crosslines and
offsets. If strict is False, segyio will still try to establish a geometry,
but it won't abort if it fails. When in non-strict mode is opened,
geometry-dependent modes such as iline will raise an error.
If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or
other geometry related structures, which leads to faster opens. This is
essentially the same as using ``strict=False`` on a file that has no
geometry.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.SegyFile
An open segyio file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.8
endian argument
When a file is opened non-strict, only raw traces access is allowed, and
using modes such as ``iline`` raise an error.
Examples
--------
Open a file in read-only mode:
>>> with segyio.open(path, "r") as f:
... print(f.ilines)
...
[1, 2, 3, 4, 5]
Open a file in read-write mode:
>>> with segyio.open(path, "r+") as f:
... f.trace = np.arange(100)
Open two files at once:
>>> with segyio.open(path) as src, segyio.open(path, "r+") as dst:
... dst.trace = src.trace # copy all traces from src to dst
Open a file little-endian file:
>>> with segyio.open(path, endian = 'little') as f:
... f.trace[0]
"""
if 'w' in mode:
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError(', '.join((problem, solution)))
endians = {
'little': 256, # (1 << 8)
'lsb': 256,
'big': 0,
'msb': 0,
}
if endian not in endians:
problem = 'unknown endianness {}, expected one of: '
opts = ' '.join(endians.keys())
raise ValueError(problem.format(endian) + opts)
from . import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.segyopen()
metrics = fd.metrics()
f = segyio.SegyFile(fd,
filename = str(filename),
mode = mode,
iline = iline,
xline = xline,
endian = endian,
)
try:
dt = segyio.tools.dt(f, fallback_dt = 4000.0) / 1000.0
t0 = f.header[0][segyio.TraceField.DelayRecordingTime]
samples = metrics['samplecount']
f._samples = (numpy.arange(samples) * dt) + t0
except:
f.close()
raise
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) | python | def open(filename, mode="r", iline = 189,
xline = 193,
strict = True,
ignore_geometry = False,
endian = 'big'):
"""Open a segy file.
Opens a segy file and tries to figure out its sorting, inline numbers,
crossline numbers, and offsets, and enables reading and writing to this
file in a simple manner.
For reading, the access mode `r` is preferred. All write operations will
raise an exception. For writing, the mode `r+` is preferred (as `rw` would
truncate the file). Any mode with `w` will raise an error. The modes used
are standard C file modes; please refer to that documentation for a
complete reference.
Open should be used together with python's ``with`` statement. Please refer
to the examples. When the ``with`` statement is used the file will
automatically be closed when the routine completes or an exception is
raised.
By default, segyio tries to open in ``strict`` mode. This means the file will
be assumed to represent a geometry with consistent inline, crosslines and
offsets. If strict is False, segyio will still try to establish a geometry,
but it won't abort if it fails. When in non-strict mode is opened,
geometry-dependent modes such as iline will raise an error.
If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or
other geometry related structures, which leads to faster opens. This is
essentially the same as using ``strict=False`` on a file that has no
geometry.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.SegyFile
An open segyio file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.8
endian argument
When a file is opened non-strict, only raw traces access is allowed, and
using modes such as ``iline`` raise an error.
Examples
--------
Open a file in read-only mode:
>>> with segyio.open(path, "r") as f:
... print(f.ilines)
...
[1, 2, 3, 4, 5]
Open a file in read-write mode:
>>> with segyio.open(path, "r+") as f:
... f.trace = np.arange(100)
Open two files at once:
>>> with segyio.open(path) as src, segyio.open(path, "r+") as dst:
... dst.trace = src.trace # copy all traces from src to dst
Open a file little-endian file:
>>> with segyio.open(path, endian = 'little') as f:
... f.trace[0]
"""
if 'w' in mode:
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError(', '.join((problem, solution)))
endians = {
'little': 256, # (1 << 8)
'lsb': 256,
'big': 0,
'msb': 0,
}
if endian not in endians:
problem = 'unknown endianness {}, expected one of: '
opts = ' '.join(endians.keys())
raise ValueError(problem.format(endian) + opts)
from . import _segyio
fd = _segyio.segyiofd(str(filename), mode, endians[endian])
fd.segyopen()
metrics = fd.metrics()
f = segyio.SegyFile(fd,
filename = str(filename),
mode = mode,
iline = iline,
xline = xline,
endian = endian,
)
try:
dt = segyio.tools.dt(f, fallback_dt = 4000.0) / 1000.0
t0 = f.header[0][segyio.TraceField.DelayRecordingTime]
samples = metrics['samplecount']
f._samples = (numpy.arange(samples) * dt) + t0
except:
f.close()
raise
if ignore_geometry:
return f
return infer_geometry(f, metrics, iline, xline, strict) | [
"def",
"open",
"(",
"filename",
",",
"mode",
"=",
"\"r\"",
",",
"iline",
"=",
"189",
",",
"xline",
"=",
"193",
",",
"strict",
"=",
"True",
",",
"ignore_geometry",
"=",
"False",
",",
"endian",
"=",
"'big'",
")",
":",
"if",
"'w'",
"in",
"mode",
":",
... | Open a segy file.
Opens a segy file and tries to figure out its sorting, inline numbers,
crossline numbers, and offsets, and enables reading and writing to this
file in a simple manner.
For reading, the access mode `r` is preferred. All write operations will
raise an exception. For writing, the mode `r+` is preferred (as `rw` would
truncate the file). Any mode with `w` will raise an error. The modes used
are standard C file modes; please refer to that documentation for a
complete reference.
Open should be used together with python's ``with`` statement. Please refer
to the examples. When the ``with`` statement is used the file will
automatically be closed when the routine completes or an exception is
raised.
By default, segyio tries to open in ``strict`` mode. This means the file will
be assumed to represent a geometry with consistent inline, crosslines and
offsets. If strict is False, segyio will still try to establish a geometry,
but it won't abort if it fails. When in non-strict mode is opened,
geometry-dependent modes such as iline will raise an error.
If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or
other geometry related structures, which leads to faster opens. This is
essentially the same as using ``strict=False`` on a file that has no
geometry.
Parameters
----------
filename : str
Path to file to open
mode : {'r', 'r+'}
File access mode, read-only ('r', default) or read-write ('r+')
iline : int or segyio.TraceField
Inline number field in the trace headers. Defaults to 189 as per the
SEG-Y rev1 specification
xline : int or segyio.TraceField
Crossline number field in the trace headers. Defaults to 193 as per the
SEG-Y rev1 specification
strict : bool, optional
Abort if a geometry cannot be inferred. Defaults to True.
ignore_geometry : bool, optional
Opt out on building geometry information, useful for e.g. shot
organised files. Defaults to False.
endian : {'big', 'msb', 'little', 'lsb'}
File endianness, big/msb (default) or little/lsb
Returns
-------
file : segyio.SegyFile
An open segyio file handle
Raises
------
ValueError
If the mode string contains 'w', as it would truncate the file
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.8
endian argument
When a file is opened non-strict, only raw traces access is allowed, and
using modes such as ``iline`` raise an error.
Examples
--------
Open a file in read-only mode:
>>> with segyio.open(path, "r") as f:
... print(f.ilines)
...
[1, 2, 3, 4, 5]
Open a file in read-write mode:
>>> with segyio.open(path, "r+") as f:
... f.trace = np.arange(100)
Open two files at once:
>>> with segyio.open(path) as src, segyio.open(path, "r+") as dst:
... dst.trace = src.trace # copy all traces from src to dst
Open a file little-endian file:
>>> with segyio.open(path, endian = 'little') as f:
... f.trace[0] | [
"Open",
"a",
"segy",
"file",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/open.py#L33-L187 | train | 220,077 |
equinor/segyio | python/segyio/field.py | Field.fetch | def fetch(self, buf = None, traceno = None):
"""Fetch the header from disk
This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
This method is largely meant for internal use - if you need to reload
disk contents, use ``reload``.
Fetch does not update any internal state (unless `buf` is ``None`` on a
trace header, and the read succeeds), but returns the fetched header
contents.
This method can be used to reposition the trace header, which is useful
for constructing generators.
If this is called on a writable, new file, and this header has not yet
been written to, it will successfully return an empty buffer that, when
written to, will be reflected on disk.
Parameters
----------
buf : bytearray
buffer to read into instead of ``self.buf``
traceno : int
Returns
-------
buf : bytearray
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in high-performance code.
"""
if buf is None:
buf = self.buf
if traceno is None:
traceno = self.traceno
try:
if self.kind == TraceField:
if traceno is None: return buf
return self.filehandle.getth(traceno, buf)
else:
return self.filehandle.getbin()
except IOError:
if not self.readonly:
# the file was probably newly created and the trace header
# hasn't been written yet, and we set the buffer to zero. if
# this is the case we want to try and write it later, and if
# the file was broken, permissions were wrong etc writing will
# fail too
#
# if the file is opened read-only and this happens, there's no
# way to actually write and the error is an actual error
return bytearray(len(self.buf))
else: raise | python | def fetch(self, buf = None, traceno = None):
"""Fetch the header from disk
This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
This method is largely meant for internal use - if you need to reload
disk contents, use ``reload``.
Fetch does not update any internal state (unless `buf` is ``None`` on a
trace header, and the read succeeds), but returns the fetched header
contents.
This method can be used to reposition the trace header, which is useful
for constructing generators.
If this is called on a writable, new file, and this header has not yet
been written to, it will successfully return an empty buffer that, when
written to, will be reflected on disk.
Parameters
----------
buf : bytearray
buffer to read into instead of ``self.buf``
traceno : int
Returns
-------
buf : bytearray
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in high-performance code.
"""
if buf is None:
buf = self.buf
if traceno is None:
traceno = self.traceno
try:
if self.kind == TraceField:
if traceno is None: return buf
return self.filehandle.getth(traceno, buf)
else:
return self.filehandle.getbin()
except IOError:
if not self.readonly:
# the file was probably newly created and the trace header
# hasn't been written yet, and we set the buffer to zero. if
# this is the case we want to try and write it later, and if
# the file was broken, permissions were wrong etc writing will
# fail too
#
# if the file is opened read-only and this happens, there's no
# way to actually write and the error is an actual error
return bytearray(len(self.buf))
else: raise | [
"def",
"fetch",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"traceno",
"=",
"None",
")",
":",
"if",
"buf",
"is",
"None",
":",
"buf",
"=",
"self",
".",
"buf",
"if",
"traceno",
"is",
"None",
":",
"traceno",
"=",
"self",
".",
"traceno",
"try",
":",
... | Fetch the header from disk
This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
This method is largely meant for internal use - if you need to reload
disk contents, use ``reload``.
Fetch does not update any internal state (unless `buf` is ``None`` on a
trace header, and the read succeeds), but returns the fetched header
contents.
This method can be used to reposition the trace header, which is useful
for constructing generators.
If this is called on a writable, new file, and this header has not yet
been written to, it will successfully return an empty buffer that, when
written to, will be reflected on disk.
Parameters
----------
buf : bytearray
buffer to read into instead of ``self.buf``
traceno : int
Returns
-------
buf : bytearray
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in high-performance code. | [
"Fetch",
"the",
"header",
"from",
"disk"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/field.py#L183-L243 | train | 220,078 |
equinor/segyio | python/segyio/field.py | Field.reload | def reload(self):
"""
This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
It's rarely required to call this method, and it's a symptom of fragile
code. However, if you have multiple handles to the same header, it
might be necessary. Consider the following example::
>>> x = f.header[10]
>>> y = f.header[10]
>>> x[1, 5]
{ 1: 5, 5: 10 }
>>> y[1, 5]
{ 1: 5, 5: 10 }
>>> x[1] = 6
>>> x[1], y[1] # write to x[1] is invisible to y
6, 5
>>> y.reload()
>>> x[1], y[1]
6, 6
>>> x[1] = 5
>>> x[1], y[1]
5, 6
>>> y[5] = 1
>>> x.reload()
>>> x[1], y[1, 5] # the write to x[1] is lost
6, { 1: 6; 5: 1 }
In segyio, headers writes are atomic, and the write to disk writes the
full cache. If this cache is out of date, some writes might get lost,
even though the updates are compatible.
The fix to this issue is either to use ``reload`` and maintain buffer
consistency, or simply don't let header handles alias and overlap in
lifetime.
Notes
-----
.. versionadded:: 1.6
"""
self.buf = self.fetch(buf = self.buf)
return self | python | def reload(self):
"""
This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
It's rarely required to call this method, and it's a symptom of fragile
code. However, if you have multiple handles to the same header, it
might be necessary. Consider the following example::
>>> x = f.header[10]
>>> y = f.header[10]
>>> x[1, 5]
{ 1: 5, 5: 10 }
>>> y[1, 5]
{ 1: 5, 5: 10 }
>>> x[1] = 6
>>> x[1], y[1] # write to x[1] is invisible to y
6, 5
>>> y.reload()
>>> x[1], y[1]
6, 6
>>> x[1] = 5
>>> x[1], y[1]
5, 6
>>> y[5] = 1
>>> x.reload()
>>> x[1], y[1, 5] # the write to x[1] is lost
6, { 1: 6; 5: 1 }
In segyio, headers writes are atomic, and the write to disk writes the
full cache. If this cache is out of date, some writes might get lost,
even though the updates are compatible.
The fix to this issue is either to use ``reload`` and maintain buffer
consistency, or simply don't let header handles alias and overlap in
lifetime.
Notes
-----
.. versionadded:: 1.6
"""
self.buf = self.fetch(buf = self.buf)
return self | [
"def",
"reload",
"(",
"self",
")",
":",
"self",
".",
"buf",
"=",
"self",
".",
"fetch",
"(",
"buf",
"=",
"self",
".",
"buf",
")",
"return",
"self"
] | This object will read header when it is constructed, which means it
might be out-of-date if the file is updated through some other handle.
It's rarely required to call this method, and it's a symptom of fragile
code. However, if you have multiple handles to the same header, it
might be necessary. Consider the following example::
>>> x = f.header[10]
>>> y = f.header[10]
>>> x[1, 5]
{ 1: 5, 5: 10 }
>>> y[1, 5]
{ 1: 5, 5: 10 }
>>> x[1] = 6
>>> x[1], y[1] # write to x[1] is invisible to y
6, 5
>>> y.reload()
>>> x[1], y[1]
6, 6
>>> x[1] = 5
>>> x[1], y[1]
5, 6
>>> y[5] = 1
>>> x.reload()
>>> x[1], y[1, 5] # the write to x[1] is lost
6, { 1: 6; 5: 1 }
In segyio, headers writes are atomic, and the write to disk writes the
full cache. If this cache is out of date, some writes might get lost,
even though the updates are compatible.
The fix to this issue is either to use ``reload`` and maintain buffer
consistency, or simply don't let header handles alias and overlap in
lifetime.
Notes
-----
.. versionadded:: 1.6 | [
"This",
"object",
"will",
"read",
"header",
"when",
"it",
"is",
"constructed",
"which",
"means",
"it",
"might",
"be",
"out",
"-",
"of",
"-",
"date",
"if",
"the",
"file",
"is",
"updated",
"through",
"some",
"other",
"handle",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/field.py#L245-L288 | train | 220,079 |
equinor/segyio | python/segyio/field.py | Field.flush | def flush(self):
"""Commit backing storage to disk
This method is largely internal, and it is not necessary to call this
from user code. It should not be explicitly invoked and may be removed
in future versions.
"""
if self.kind == TraceField:
self.filehandle.putth(self.traceno, self.buf)
elif self.kind == BinField:
self.filehandle.putbin(self.buf)
else:
msg = 'Object corrupted: kind {} not valid'
raise RuntimeError(msg.format(self.kind)) | python | def flush(self):
"""Commit backing storage to disk
This method is largely internal, and it is not necessary to call this
from user code. It should not be explicitly invoked and may be removed
in future versions.
"""
if self.kind == TraceField:
self.filehandle.putth(self.traceno, self.buf)
elif self.kind == BinField:
self.filehandle.putbin(self.buf)
else:
msg = 'Object corrupted: kind {} not valid'
raise RuntimeError(msg.format(self.kind)) | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"self",
".",
"kind",
"==",
"TraceField",
":",
"self",
".",
"filehandle",
".",
"putth",
"(",
"self",
".",
"traceno",
",",
"self",
".",
"buf",
")",
"elif",
"self",
".",
"kind",
"==",
"BinField",
":",
"self... | Commit backing storage to disk
This method is largely internal, and it is not necessary to call this
from user code. It should not be explicitly invoked and may be removed
in future versions. | [
"Commit",
"backing",
"storage",
"to",
"disk"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/field.py#L290-L306 | train | 220,080 |
equinor/segyio | python/segyio/trace.py | Trace.raw | def raw(self):
"""
An eager version of Trace
Returns
-------
raw : RawTrace
"""
return RawTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
) | python | def raw(self):
"""
An eager version of Trace
Returns
-------
raw : RawTrace
"""
return RawTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
) | [
"def",
"raw",
"(",
"self",
")",
":",
"return",
"RawTrace",
"(",
"self",
".",
"filehandle",
",",
"self",
".",
"dtype",
",",
"len",
"(",
"self",
")",
",",
"self",
".",
"shape",
",",
"self",
".",
"readonly",
",",
")"
] | An eager version of Trace
Returns
-------
raw : RawTrace | [
"An",
"eager",
"version",
"of",
"Trace"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/trace.py#L253-L266 | train | 220,081 |
equinor/segyio | python/segyio/trace.py | Trace.ref | def ref(self):
"""
A write-back version of Trace
Returns
-------
ref : RefTrace
`ref` is returned in a context manager, and must be in a ``with``
statement
Notes
-----
.. versionadded:: 1.6
Examples
--------
>>> with trace.ref as ref:
... ref[10] += 1.617
"""
x = RefTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
)
yield x
x.flush() | python | def ref(self):
"""
A write-back version of Trace
Returns
-------
ref : RefTrace
`ref` is returned in a context manager, and must be in a ``with``
statement
Notes
-----
.. versionadded:: 1.6
Examples
--------
>>> with trace.ref as ref:
... ref[10] += 1.617
"""
x = RefTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
)
yield x
x.flush() | [
"def",
"ref",
"(",
"self",
")",
":",
"x",
"=",
"RefTrace",
"(",
"self",
".",
"filehandle",
",",
"self",
".",
"dtype",
",",
"len",
"(",
"self",
")",
",",
"self",
".",
"shape",
",",
"self",
".",
"readonly",
",",
")",
"yield",
"x",
"x",
".",
"flus... | A write-back version of Trace
Returns
-------
ref : RefTrace
`ref` is returned in a context manager, and must be in a ``with``
statement
Notes
-----
.. versionadded:: 1.6
Examples
--------
>>> with trace.ref as ref:
... ref[10] += 1.617 | [
"A",
"write",
"-",
"back",
"version",
"of",
"Trace"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/trace.py#L270-L297 | train | 220,082 |
equinor/segyio | python/segyio/trace.py | RefTrace.flush | def flush(self):
"""
Commit cached writes to the file handle. Does not flush libc buffers or
notifies the kernel, so these changes may not immediately be visible to
other processes.
Updates the fingerprints whena writes happen, so successive ``flush()``
invocations are no-ops.
It is not necessary to call this method in user code.
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in certain contexts to provide stronger guarantees.
"""
garbage = []
for i, (x, signature) in self.refs.items():
if sys.getrefcount(x) == 3:
garbage.append(i)
if fingerprint(x) == signature: continue
self.filehandle.puttr(i, x)
signature = fingerprint(x)
# to avoid too many resource leaks, when this dict is the only one
# holding references to already-produced traces, clear them
for i in garbage:
del self.refs[i] | python | def flush(self):
"""
Commit cached writes to the file handle. Does not flush libc buffers or
notifies the kernel, so these changes may not immediately be visible to
other processes.
Updates the fingerprints whena writes happen, so successive ``flush()``
invocations are no-ops.
It is not necessary to call this method in user code.
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in certain contexts to provide stronger guarantees.
"""
garbage = []
for i, (x, signature) in self.refs.items():
if sys.getrefcount(x) == 3:
garbage.append(i)
if fingerprint(x) == signature: continue
self.filehandle.puttr(i, x)
signature = fingerprint(x)
# to avoid too many resource leaks, when this dict is the only one
# holding references to already-produced traces, clear them
for i in garbage:
del self.refs[i] | [
"def",
"flush",
"(",
"self",
")",
":",
"garbage",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"x",
",",
"signature",
")",
"in",
"self",
".",
"refs",
".",
"items",
"(",
")",
":",
"if",
"sys",
".",
"getrefcount",
"(",
"x",
")",
"==",
"3",
":",
"garbag... | Commit cached writes to the file handle. Does not flush libc buffers or
notifies the kernel, so these changes may not immediately be visible to
other processes.
Updates the fingerprints whena writes happen, so successive ``flush()``
invocations are no-ops.
It is not necessary to call this method in user code.
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in certain contexts to provide stronger guarantees. | [
"Commit",
"cached",
"writes",
"to",
"the",
"file",
"handle",
".",
"Does",
"not",
"flush",
"libc",
"buffers",
"or",
"notifies",
"the",
"kernel",
"so",
"these",
"changes",
"may",
"not",
"immediately",
"be",
"visible",
"to",
"other",
"processes",
"."
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/trace.py#L376-L408 | train | 220,083 |
equinor/segyio | python/segyio/segy.py | SegyFile.iline | def iline(self):
"""
Interact with segy in inline mode
Returns
-------
iline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._iline is not None:
return self._iline
self._iline = Line(self,
self.ilines,
self._iline_length,
self._iline_stride,
self.offsets,
'inline',
)
return self._iline | python | def iline(self):
"""
Interact with segy in inline mode
Returns
-------
iline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._iline is not None:
return self._iline
self._iline = Line(self,
self.ilines,
self._iline_length,
self._iline_stride,
self.offsets,
'inline',
)
return self._iline | [
"def",
"iline",
"(",
"self",
")",
":",
"if",
"self",
".",
"unstructured",
":",
"raise",
"ValueError",
"(",
"self",
".",
"_unstructured_errmsg",
")",
"if",
"self",
".",
"_iline",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_iline",
"self",
".",
"_... | Interact with segy in inline mode
Returns
-------
iline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1 | [
"Interact",
"with",
"segy",
"in",
"inline",
"mode"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L501-L532 | train | 220,084 |
equinor/segyio | python/segyio/segy.py | SegyFile.xline | def xline(self):
"""
Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._xline is not None:
return self._xline
self._xline = Line(self,
self.xlines,
self._xline_length,
self._xline_stride,
self.offsets,
'crossline',
)
return self._xline | python | def xline(self):
"""
Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._xline is not None:
return self._xline
self._xline = Line(self,
self.xlines,
self._xline_length,
self._xline_stride,
self.offsets,
'crossline',
)
return self._xline | [
"def",
"xline",
"(",
"self",
")",
":",
"if",
"self",
".",
"unstructured",
":",
"raise",
"ValueError",
"(",
"self",
".",
"_unstructured_errmsg",
")",
"if",
"self",
".",
"_xline",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_xline",
"self",
".",
"_... | Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1 | [
"Interact",
"with",
"segy",
"in",
"crossline",
"mode"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L573-L603 | train | 220,085 |
equinor/segyio | python/segyio/segy.py | SegyFile.fast | def fast(self):
"""Access the 'fast' dimension
This mode yields iline or xline mode, depending on which one is laid
out `faster`, i.e. the line with linear disk layout. Use this mode if
the inline/crossline distinction isn't as interesting as traversing in
a fast manner (typically when you want to apply a function to the whole
file, line-by-line).
Returns
-------
fast : Line
line addressing mode
Notes
-----
.. versionadded:: 1.1
"""
if self.sorting == TraceSortingFormat.INLINE_SORTING:
return self.iline
elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
return self.xline
else:
raise RuntimeError("Unknown sorting.") | python | def fast(self):
"""Access the 'fast' dimension
This mode yields iline or xline mode, depending on which one is laid
out `faster`, i.e. the line with linear disk layout. Use this mode if
the inline/crossline distinction isn't as interesting as traversing in
a fast manner (typically when you want to apply a function to the whole
file, line-by-line).
Returns
-------
fast : Line
line addressing mode
Notes
-----
.. versionadded:: 1.1
"""
if self.sorting == TraceSortingFormat.INLINE_SORTING:
return self.iline
elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
return self.xline
else:
raise RuntimeError("Unknown sorting.") | [
"def",
"fast",
"(",
"self",
")",
":",
"if",
"self",
".",
"sorting",
"==",
"TraceSortingFormat",
".",
"INLINE_SORTING",
":",
"return",
"self",
".",
"iline",
"elif",
"self",
".",
"sorting",
"==",
"TraceSortingFormat",
".",
"CROSSLINE_SORTING",
":",
"return",
"... | Access the 'fast' dimension
This mode yields iline or xline mode, depending on which one is laid
out `faster`, i.e. the line with linear disk layout. Use this mode if
the inline/crossline distinction isn't as interesting as traversing in
a fast manner (typically when you want to apply a function to the whole
file, line-by-line).
Returns
-------
fast : Line
line addressing mode
Notes
-----
.. versionadded:: 1.1 | [
"Access",
"the",
"fast",
"dimension"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L630-L653 | train | 220,086 |
equinor/segyio | python/segyio/segy.py | SegyFile.gather | def gather(self):
"""
Interact with segy in gather mode
Returns
-------
gather : Gather
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._gather is not None:
return self._gather
self._gather = Gather(self.trace, self.iline, self.xline, self.offsets)
return self._gather | python | def gather(self):
"""
Interact with segy in gather mode
Returns
-------
gather : Gather
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._gather is not None:
return self._gather
self._gather = Gather(self.trace, self.iline, self.xline, self.offsets)
return self._gather | [
"def",
"gather",
"(",
"self",
")",
":",
"if",
"self",
".",
"unstructured",
":",
"raise",
"ValueError",
"(",
"self",
".",
"_unstructured_errmsg",
")",
"if",
"self",
".",
"_gather",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_gather",
"self",
".",
... | Interact with segy in gather mode
Returns
-------
gather : Gather | [
"Interact",
"with",
"segy",
"in",
"gather",
"mode"
] | 58fd449947ccd330b9af0699d6b8710550d34e8e | https://github.com/equinor/segyio/blob/58fd449947ccd330b9af0699d6b8710550d34e8e/python/segyio/segy.py#L729-L744 | train | 220,087 |
pymc-devs/pymc | pymc/StepMethods.py | pick_best_methods | def pick_best_methods(stochastic):
"""
Picks the StepMethods best suited to handle
a stochastic variable.
"""
# Keep track of most competent methohd
max_competence = 0
# Empty set of appropriate StepMethods
best_candidates = set([])
# Loop over StepMethodRegistry
for method in StepMethodRegistry:
# Parse method and its associated competence
try:
competence = method.competence(stochastic)
except:
competence = 0
# If better than current best method, promote it
if competence > max_competence:
best_candidates = set([method])
max_competence = competence
# If same competence, add it to the set of best methods
elif competence == max_competence:
best_candidates.add(method)
if max_competence <= 0:
raise ValueError(
'Maximum competence reported for stochastic %s is <= 0... you may need to write a custom step method class.' %
stochastic.__name__)
# print_(s.__name__ + ': ', best_candidates, ' ', max_competence)
return best_candidates | python | def pick_best_methods(stochastic):
"""
Picks the StepMethods best suited to handle
a stochastic variable.
"""
# Keep track of most competent methohd
max_competence = 0
# Empty set of appropriate StepMethods
best_candidates = set([])
# Loop over StepMethodRegistry
for method in StepMethodRegistry:
# Parse method and its associated competence
try:
competence = method.competence(stochastic)
except:
competence = 0
# If better than current best method, promote it
if competence > max_competence:
best_candidates = set([method])
max_competence = competence
# If same competence, add it to the set of best methods
elif competence == max_competence:
best_candidates.add(method)
if max_competence <= 0:
raise ValueError(
'Maximum competence reported for stochastic %s is <= 0... you may need to write a custom step method class.' %
stochastic.__name__)
# print_(s.__name__ + ': ', best_candidates, ' ', max_competence)
return best_candidates | [
"def",
"pick_best_methods",
"(",
"stochastic",
")",
":",
"# Keep track of most competent methohd",
"max_competence",
"=",
"0",
"# Empty set of appropriate StepMethods",
"best_candidates",
"=",
"set",
"(",
"[",
"]",
")",
"# Loop over StepMethodRegistry",
"for",
"method",
"in... | Picks the StepMethods best suited to handle
a stochastic variable. | [
"Picks",
"the",
"StepMethods",
"best",
"suited",
"to",
"handle",
"a",
"stochastic",
"variable",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L59-L94 | train | 220,088 |
pymc-devs/pymc | pymc/StepMethods.py | StepMethod.loglike | def loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed.
'''
sum = logp_of_set(self.children)
if self.verbose > 2:
print_('\t' + self._id + ' Current log-likelihood ', sum)
return sum | python | def loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed.
'''
sum = logp_of_set(self.children)
if self.verbose > 2:
print_('\t' + self._id + ' Current log-likelihood ', sum)
return sum | [
"def",
"loglike",
"(",
"self",
")",
":",
"sum",
"=",
"logp_of_set",
"(",
"self",
".",
"children",
")",
"if",
"self",
".",
"verbose",
">",
"2",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Current log-likelihood '",
",",
"sum",
")",
"... | The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed. | [
"The",
"summed",
"log",
"-",
"probability",
"of",
"all",
"stochastic",
"variables",
"that",
"depend",
"on",
"self",
".",
"stochastics",
"with",
"self",
".",
"stochastics",
"removed",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L292-L300 | train | 220,089 |
pymc-devs/pymc | pymc/StepMethods.py | StepMethod.logp_plus_loglike | def logp_plus_loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
'''
sum = logp_of_set(self.markov_blanket)
if self.verbose > 2:
print_('\t' + self._id +
' Current log-likelihood plus current log-probability', sum)
return sum | python | def logp_plus_loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics.
'''
sum = logp_of_set(self.markov_blanket)
if self.verbose > 2:
print_('\t' + self._id +
' Current log-likelihood plus current log-probability', sum)
return sum | [
"def",
"logp_plus_loglike",
"(",
"self",
")",
":",
"sum",
"=",
"logp_of_set",
"(",
"self",
".",
"markov_blanket",
")",
"if",
"self",
".",
"verbose",
">",
"2",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Current log-likelihood plus current l... | The summed log-probability of all stochastic variables that depend on
self.stochastics, and self.stochastics. | [
"The",
"summed",
"log",
"-",
"probability",
"of",
"all",
"stochastic",
"variables",
"that",
"depend",
"on",
"self",
".",
"stochastics",
"and",
"self",
".",
"stochastics",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L303-L312 | train | 220,090 |
pymc-devs/pymc | pymc/StepMethods.py | StepMethod.current_state | def current_state(self):
"""Return a dictionary with the current value of the variables defining
the state of the step method."""
state = {}
for s in self._state:
state[s] = getattr(self, s)
return state | python | def current_state(self):
"""Return a dictionary with the current value of the variables defining
the state of the step method."""
state = {}
for s in self._state:
state[s] = getattr(self, s)
return state | [
"def",
"current_state",
"(",
"self",
")",
":",
"state",
"=",
"{",
"}",
"for",
"s",
"in",
"self",
".",
"_state",
":",
"state",
"[",
"s",
"]",
"=",
"getattr",
"(",
"self",
",",
"s",
")",
"return",
"state"
] | Return a dictionary with the current value of the variables defining
the state of the step method. | [
"Return",
"a",
"dictionary",
"with",
"the",
"current",
"value",
"of",
"the",
"variables",
"defining",
"the",
"state",
"of",
"the",
"step",
"method",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L318-L324 | train | 220,091 |
pymc-devs/pymc | pymc/StepMethods.py | Metropolis.step | def step(self):
"""
The default step method applies if the variable is floating-point
valued, and is not being proposed from its prior.
"""
# Probability and likelihood for s's current value:
if self.verbose > 2:
print_()
print_(self._id + ' getting initial logp.')
if self.proposal_distribution == "prior":
logp = self.loglike
else:
logp = self.logp_plus_loglike
if self.verbose > 2:
print_(self._id + ' proposing.')
# Sample a candidate value
self.propose()
# Probability and likelihood for s's proposed value:
try:
if self.proposal_distribution == "prior":
logp_p = self.loglike
# Check for weirdness before accepting jump
if self.check_before_accepting:
self.stochastic.logp
else:
logp_p = self.logp_plus_loglike
except ZeroProbability:
# Reject proposal
if self.verbose > 2:
print_(self._id + ' rejecting due to ZeroProbability.')
self.reject()
# Increment rejected count
self.rejected += 1
if self.verbose > 2:
print_(self._id + ' returning.')
return
if self.verbose > 2:
print_('logp_p - logp: ', logp_p - logp)
HF = self.hastings_factor()
# Evaluate acceptance ratio
if log(random()) > logp_p - logp + HF:
# Revert s if fail
self.reject()
# Increment rejected count
self.rejected += 1
if self.verbose > 2:
print_(self._id + ' rejecting')
else:
# Increment accepted count
self.accepted += 1
if self.verbose > 2:
print_(self._id + ' accepting')
if self.verbose > 2:
print_(self._id + ' returning.') | python | def step(self):
"""
The default step method applies if the variable is floating-point
valued, and is not being proposed from its prior.
"""
# Probability and likelihood for s's current value:
if self.verbose > 2:
print_()
print_(self._id + ' getting initial logp.')
if self.proposal_distribution == "prior":
logp = self.loglike
else:
logp = self.logp_plus_loglike
if self.verbose > 2:
print_(self._id + ' proposing.')
# Sample a candidate value
self.propose()
# Probability and likelihood for s's proposed value:
try:
if self.proposal_distribution == "prior":
logp_p = self.loglike
# Check for weirdness before accepting jump
if self.check_before_accepting:
self.stochastic.logp
else:
logp_p = self.logp_plus_loglike
except ZeroProbability:
# Reject proposal
if self.verbose > 2:
print_(self._id + ' rejecting due to ZeroProbability.')
self.reject()
# Increment rejected count
self.rejected += 1
if self.verbose > 2:
print_(self._id + ' returning.')
return
if self.verbose > 2:
print_('logp_p - logp: ', logp_p - logp)
HF = self.hastings_factor()
# Evaluate acceptance ratio
if log(random()) > logp_p - logp + HF:
# Revert s if fail
self.reject()
# Increment rejected count
self.rejected += 1
if self.verbose > 2:
print_(self._id + ' rejecting')
else:
# Increment accepted count
self.accepted += 1
if self.verbose > 2:
print_(self._id + ' accepting')
if self.verbose > 2:
print_(self._id + ' returning.') | [
"def",
"step",
"(",
"self",
")",
":",
"# Probability and likelihood for s's current value:",
"if",
"self",
".",
"verbose",
">",
"2",
":",
"print_",
"(",
")",
"print_",
"(",
"self",
".",
"_id",
"+",
"' getting initial logp.'",
")",
"if",
"self",
".",
"proposal_... | The default step method applies if the variable is floating-point
valued, and is not being proposed from its prior. | [
"The",
"default",
"step",
"method",
"applies",
"if",
"the",
"variable",
"is",
"floating",
"-",
"point",
"valued",
"and",
"is",
"not",
"being",
"proposed",
"from",
"its",
"prior",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L470-L539 | train | 220,092 |
pymc-devs/pymc | pymc/StepMethods.py | PDMatrixMetropolis.competence | def competence(s):
"""
The competence function for MatrixMetropolis
"""
# MatrixMetropolis handles the Wishart family, which are valued as
# _symmetric_ matrices.
if any([isinstance(s, cls)
for cls in [distributions.Wishart, distributions.WishartCov]]):
return 2
else:
return 0 | python | def competence(s):
"""
The competence function for MatrixMetropolis
"""
# MatrixMetropolis handles the Wishart family, which are valued as
# _symmetric_ matrices.
if any([isinstance(s, cls)
for cls in [distributions.Wishart, distributions.WishartCov]]):
return 2
else:
return 0 | [
"def",
"competence",
"(",
"s",
")",
":",
"# MatrixMetropolis handles the Wishart family, which are valued as",
"# _symmetric_ matrices.",
"if",
"any",
"(",
"[",
"isinstance",
"(",
"s",
",",
"cls",
")",
"for",
"cls",
"in",
"[",
"distributions",
".",
"Wishart",
",",
... | The competence function for MatrixMetropolis | [
"The",
"competence",
"function",
"for",
"MatrixMetropolis"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L657-L667 | train | 220,093 |
pymc-devs/pymc | pymc/StepMethods.py | PDMatrixMetropolis.propose | def propose(self):
"""
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
"""
# Locally store size of matrix
dims = self.stochastic.value.shape
# Add normal deviate to value and symmetrize
dev = rnormal(
0,
self.adaptive_scale_factor *
self.proposal_sd,
size=dims)
symmetrize(dev)
# Replace
self.stochastic.value = dev + self.stochastic.value | python | def propose(self):
"""
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
"""
# Locally store size of matrix
dims = self.stochastic.value.shape
# Add normal deviate to value and symmetrize
dev = rnormal(
0,
self.adaptive_scale_factor *
self.proposal_sd,
size=dims)
symmetrize(dev)
# Replace
self.stochastic.value = dev + self.stochastic.value | [
"def",
"propose",
"(",
"self",
")",
":",
"# Locally store size of matrix",
"dims",
"=",
"self",
".",
"stochastic",
".",
"value",
".",
"shape",
"# Add normal deviate to value and symmetrize",
"dev",
"=",
"rnormal",
"(",
"0",
",",
"self",
".",
"adaptive_scale_factor",... | Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value. | [
"Proposals",
"for",
"positive",
"definite",
"matrix",
"using",
"random",
"walk",
"deviations",
"on",
"the",
"Cholesky",
"factor",
"of",
"the",
"current",
"value",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L669-L687 | train | 220,094 |
pymc-devs/pymc | pymc/StepMethods.py | BinaryMetropolis.competence | def competence(stochastic):
"""
The competence function for Binary One-At-A-Time Metropolis
"""
if stochastic.dtype in bool_dtypes:
return 2
elif isinstance(stochastic, distributions.Bernoulli):
return 2
elif (isinstance(stochastic, distributions.Categorical) and
(len(stochastic.parents['p'])==2)):
return 2
else:
return 0 | python | def competence(stochastic):
"""
The competence function for Binary One-At-A-Time Metropolis
"""
if stochastic.dtype in bool_dtypes:
return 2
elif isinstance(stochastic, distributions.Bernoulli):
return 2
elif (isinstance(stochastic, distributions.Categorical) and
(len(stochastic.parents['p'])==2)):
return 2
else:
return 0 | [
"def",
"competence",
"(",
"stochastic",
")",
":",
"if",
"stochastic",
".",
"dtype",
"in",
"bool_dtypes",
":",
"return",
"2",
"elif",
"isinstance",
"(",
"stochastic",
",",
"distributions",
".",
"Bernoulli",
")",
":",
"return",
"2",
"elif",
"(",
"isinstance",
... | The competence function for Binary One-At-A-Time Metropolis | [
"The",
"competence",
"function",
"for",
"Binary",
"One",
"-",
"At",
"-",
"A",
"-",
"Time",
"Metropolis"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L901-L916 | train | 220,095 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.cov_from_value | def cov_from_value(self, scaling):
"""Return a covariance matrix for the jump distribution using
the actual value of the stochastic as a guess of their variance,
divided by the `scaling` argument.
Note that this is likely to return a poor guess.
"""
rv = []
for s in self.stochastics:
rv.extend(np.ravel(s.value).copy())
# Remove 0 values since this would lead to quite small jumps...
arv = np.array(rv)
arv[arv == 0] = 1.
# Create a diagonal covariance matrix using the scaling factor.
return np.eye(self.dim) * np.abs(arv) / scaling | python | def cov_from_value(self, scaling):
"""Return a covariance matrix for the jump distribution using
the actual value of the stochastic as a guess of their variance,
divided by the `scaling` argument.
Note that this is likely to return a poor guess.
"""
rv = []
for s in self.stochastics:
rv.extend(np.ravel(s.value).copy())
# Remove 0 values since this would lead to quite small jumps...
arv = np.array(rv)
arv[arv == 0] = 1.
# Create a diagonal covariance matrix using the scaling factor.
return np.eye(self.dim) * np.abs(arv) / scaling | [
"def",
"cov_from_value",
"(",
"self",
",",
"scaling",
")",
":",
"rv",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"stochastics",
":",
"rv",
".",
"extend",
"(",
"np",
".",
"ravel",
"(",
"s",
".",
"value",
")",
".",
"copy",
"(",
")",
")",
"# Re... | Return a covariance matrix for the jump distribution using
the actual value of the stochastic as a guess of their variance,
divided by the `scaling` argument.
Note that this is likely to return a poor guess. | [
"Return",
"a",
"covariance",
"matrix",
"for",
"the",
"jump",
"distribution",
"using",
"the",
"actual",
"value",
"of",
"the",
"stochastic",
"as",
"a",
"guess",
"of",
"their",
"variance",
"divided",
"by",
"the",
"scaling",
"argument",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1135-L1151 | train | 220,096 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.cov_from_scales | def cov_from_scales(self, scales):
"""Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length.
"""
# Get array of scales
ord_sc = []
for stochastic in self.stochastics:
ord_sc.append(np.ravel(scales[stochastic]))
ord_sc = np.concatenate(ord_sc)
if np.squeeze(ord_sc).shape[0] != self.dim:
raise ValueError("Improper initial scales, dimension don't match",
(np.squeeze(ord_sc), self.dim))
# Scale identity matrix
return np.eye(self.dim) * ord_sc | python | def cov_from_scales(self, scales):
"""Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length.
"""
# Get array of scales
ord_sc = []
for stochastic in self.stochastics:
ord_sc.append(np.ravel(scales[stochastic]))
ord_sc = np.concatenate(ord_sc)
if np.squeeze(ord_sc).shape[0] != self.dim:
raise ValueError("Improper initial scales, dimension don't match",
(np.squeeze(ord_sc), self.dim))
# Scale identity matrix
return np.eye(self.dim) * ord_sc | [
"def",
"cov_from_scales",
"(",
"self",
",",
"scales",
")",
":",
"# Get array of scales",
"ord_sc",
"=",
"[",
"]",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"ord_sc",
".",
"append",
"(",
"np",
".",
"ravel",
"(",
"scales",
"[",
"stochastic"... | Return a covariance matrix built from a dictionary of scales.
`scales` is a dictionary keyed by stochastic instances, and the
values refer are the variance of the jump distribution for each
stochastic. If a stochastic is a sequence, the variance must
have the same length. | [
"Return",
"a",
"covariance",
"matrix",
"built",
"from",
"a",
"dictionary",
"of",
"scales",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1153-L1173 | train | 220,097 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.cov_from_trace | def cov_from_trace(self, trace=slice(None)):
"""Define the jump distribution covariance matrix from the object's
stored trace.
:Parameters:
- `trace` : slice or int
A slice for the stochastic object's trace in the last chain, or a
an integer indicating the how many of the last samples will be used.
"""
n = []
for s in self.stochastics:
n.append(s.trace.length())
n = set(n)
if len(n) > 1:
raise ValueError('Traces do not have the same length.')
elif n == 0:
raise AttributeError(
'Stochastic has no trace to compute covariance.')
else:
n = n.pop()
if not isinstance(trace, slice):
trace = slice(trace, n)
a = self.trace2array(trace)
return np.cov(a, rowvar=0) | python | def cov_from_trace(self, trace=slice(None)):
"""Define the jump distribution covariance matrix from the object's
stored trace.
:Parameters:
- `trace` : slice or int
A slice for the stochastic object's trace in the last chain, or a
an integer indicating the how many of the last samples will be used.
"""
n = []
for s in self.stochastics:
n.append(s.trace.length())
n = set(n)
if len(n) > 1:
raise ValueError('Traces do not have the same length.')
elif n == 0:
raise AttributeError(
'Stochastic has no trace to compute covariance.')
else:
n = n.pop()
if not isinstance(trace, slice):
trace = slice(trace, n)
a = self.trace2array(trace)
return np.cov(a, rowvar=0) | [
"def",
"cov_from_trace",
"(",
"self",
",",
"trace",
"=",
"slice",
"(",
"None",
")",
")",
":",
"n",
"=",
"[",
"]",
"for",
"s",
"in",
"self",
".",
"stochastics",
":",
"n",
".",
"append",
"(",
"s",
".",
"trace",
".",
"length",
"(",
")",
")",
"n",
... | Define the jump distribution covariance matrix from the object's
stored trace.
:Parameters:
- `trace` : slice or int
A slice for the stochastic object's trace in the last chain, or a
an integer indicating the how many of the last samples will be used. | [
"Define",
"the",
"jump",
"distribution",
"covariance",
"matrix",
"from",
"the",
"object",
"s",
"stored",
"trace",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1175-L1202 | train | 220,098 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.check_type | def check_type(self):
"""Make sure each stochastic has a correct type, and identify discrete stochastics."""
self.isdiscrete = {}
for stochastic in self.stochastics:
if stochastic.dtype in integer_dtypes:
self.isdiscrete[stochastic] = True
elif stochastic.dtype in bool_dtypes:
raise ValueError(
'Binary stochastics not supported by AdaptativeMetropolis.')
else:
self.isdiscrete[stochastic] = False | python | def check_type(self):
"""Make sure each stochastic has a correct type, and identify discrete stochastics."""
self.isdiscrete = {}
for stochastic in self.stochastics:
if stochastic.dtype in integer_dtypes:
self.isdiscrete[stochastic] = True
elif stochastic.dtype in bool_dtypes:
raise ValueError(
'Binary stochastics not supported by AdaptativeMetropolis.')
else:
self.isdiscrete[stochastic] = False | [
"def",
"check_type",
"(",
"self",
")",
":",
"self",
".",
"isdiscrete",
"=",
"{",
"}",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"if",
"stochastic",
".",
"dtype",
"in",
"integer_dtypes",
":",
"self",
".",
"isdiscrete",
"[",
"stochastic",
... | Make sure each stochastic has a correct type, and identify discrete stochastics. | [
"Make",
"sure",
"each",
"stochastic",
"has",
"a",
"correct",
"type",
"and",
"identify",
"discrete",
"stochastics",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1204-L1214 | train | 220,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.