repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
robmcmullen/atrcopy | atrcopy/segments.py | SegmentData.reverse_index_mapping | def reverse_index_mapping(self):
"""Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
"""
if self._reverse_index_mapping is None:
if self.is_indexed:
# Initialize array to out of range
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.order] = np.arange(len(self.order), dtype=np.int32)
elif self.data.base is None:
# Starts at the beginning; produces the identity
r = np.arange(self.data_length, dtype=np.int32)
else:
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.data_start - self.base_start:self.data_end - self.base_start] = np.arange(self.data_length, dtype=np.int32)
self._reverse_index_mapping = r
return self._reverse_index_mapping | python | def reverse_index_mapping(self):
"""Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array
"""
if self._reverse_index_mapping is None:
if self.is_indexed:
# Initialize array to out of range
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.order] = np.arange(len(self.order), dtype=np.int32)
elif self.data.base is None:
# Starts at the beginning; produces the identity
r = np.arange(self.data_length, dtype=np.int32)
else:
r = np.zeros(self.base_length, dtype=np.int32) - 1
r[self.data_start - self.base_start:self.data_end - self.base_start] = np.arange(self.data_length, dtype=np.int32)
self._reverse_index_mapping = r
return self._reverse_index_mapping | [
"def",
"reverse_index_mapping",
"(",
"self",
")",
":",
"if",
"self",
".",
"_reverse_index_mapping",
"is",
"None",
":",
"if",
"self",
".",
"is_indexed",
":",
"# Initialize array to out of range",
"r",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"base_length",
",... | Get mapping from this segment's indexes to the indexes of
the base array.
If the index is < 0, the index is out of range, meaning that it doesn't
exist in this segment and is not mapped to the base array | [
"Get",
"mapping",
"from",
"this",
"segment",
"s",
"indexes",
"to",
"the",
"indexes",
"of",
"the",
"base",
"array",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L358-L377 | train | 48,800 |
robmcmullen/atrcopy | atrcopy/segments.py | SegmentData.get_reverse_index | def get_reverse_index(self, base_index):
"""Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data
"""
r = self.reverse_index_mapping[base_index]
if r < 0:
raise IndexError("index %d not mapped in this segment" % base_index)
return r | python | def get_reverse_index(self, base_index):
"""Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data
"""
r = self.reverse_index_mapping[base_index]
if r < 0:
raise IndexError("index %d not mapped in this segment" % base_index)
return r | [
"def",
"get_reverse_index",
"(",
"self",
",",
"base_index",
")",
":",
"r",
"=",
"self",
".",
"reverse_index_mapping",
"[",
"base_index",
"]",
"if",
"r",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"\"index %d not mapped in this segment\"",
"%",
"base_index",
")",... | Get index into this segment's data given the index into the base data
Raises IndexError if the base index doesn't map to anything in this
segment's data | [
"Get",
"index",
"into",
"this",
"segment",
"s",
"data",
"given",
"the",
"index",
"into",
"the",
"base",
"data"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L379-L388 | train | 48,801 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.resize | def resize(self, newsize, zeros=True):
""" Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base.
"""
if not self.can_resize:
raise ValueError("Segment %s can't be resized" % str(self))
# only makes sense for the container (outermost) object
if not self.rawdata.is_base:
raise ValueError("Only container segments can be resized")
origsize = len(self)
self.rawdata.resize(newsize)
self.set_raw(self.rawdata) # force attributes to be reset
newsize = len(self)
if zeros:
if newsize > origsize:
self.data[origsize:] = 0
self.style[origsize:] = 0
return origsize, newsize | python | def resize(self, newsize, zeros=True):
""" Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base.
"""
if not self.can_resize:
raise ValueError("Segment %s can't be resized" % str(self))
# only makes sense for the container (outermost) object
if not self.rawdata.is_base:
raise ValueError("Only container segments can be resized")
origsize = len(self)
self.rawdata.resize(newsize)
self.set_raw(self.rawdata) # force attributes to be reset
newsize = len(self)
if zeros:
if newsize > origsize:
self.data[origsize:] = 0
self.style[origsize:] = 0
return origsize, newsize | [
"def",
"resize",
"(",
"self",
",",
"newsize",
",",
"zeros",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"can_resize",
":",
"raise",
"ValueError",
"(",
"\"Segment %s can't be resized\"",
"%",
"str",
"(",
"self",
")",
")",
"# only makes sense for the contai... | Resize the data arrays.
This can only be performed on the container segment. Child segments
must adjust their rawdata to point to the correct place.
Since segments don't keep references to other segments, it is the
user's responsibility to update any child segments that point to this
segment's data.
Numpy can't do an in-place resize on an array that has a view, so the
data must be replaced and all segments that point to that raw data must
also be changed. This has to happen outside this method because it
doesn't know the segment list of segments using itself as a base. | [
"Resize",
"the",
"data",
"arrays",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L430-L458 | train | 48,802 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.reconstruct_raw | def reconstruct_raw(self, rawdata):
"""Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments.
"""
start, end = self._rawdata_bounds
r = rawdata[start:end]
delattr(self, '_rawdata_bounds')
try:
if self._order_list:
order = to_numpy_list(self._order_list)
r = r.get_indexed(order)
delattr(self, '_order_list')
except AttributeError:
pass
self.set_raw(r) | python | def reconstruct_raw(self, rawdata):
"""Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments.
"""
start, end = self._rawdata_bounds
r = rawdata[start:end]
delattr(self, '_rawdata_bounds')
try:
if self._order_list:
order = to_numpy_list(self._order_list)
r = r.get_indexed(order)
delattr(self, '_order_list')
except AttributeError:
pass
self.set_raw(r) | [
"def",
"reconstruct_raw",
"(",
"self",
",",
"rawdata",
")",
":",
"start",
",",
"end",
"=",
"self",
".",
"_rawdata_bounds",
"r",
"=",
"rawdata",
"[",
"start",
":",
"end",
"]",
"delattr",
"(",
"self",
",",
"'_rawdata_bounds'",
")",
"try",
":",
"if",
"sel... | Reconstruct the pointers to the parent data arrays
Each segment is a view into the primary segment's data, so those
pointers and the order must be restored in the child segments. | [
"Reconstruct",
"the",
"pointers",
"to",
"the",
"parent",
"data",
"arrays"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L529-L545 | train | 48,803 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.get_parallel_raw_data | def get_parallel_raw_data(self, other):
""" Get the raw data that is similar to the specified other segment
"""
start, end = other.byte_bounds_offset()
r = self.rawdata[start:end]
if other.rawdata.is_indexed:
r = r.get_indexed[other.order]
return r | python | def get_parallel_raw_data(self, other):
""" Get the raw data that is similar to the specified other segment
"""
start, end = other.byte_bounds_offset()
r = self.rawdata[start:end]
if other.rawdata.is_indexed:
r = r.get_indexed[other.order]
return r | [
"def",
"get_parallel_raw_data",
"(",
"self",
",",
"other",
")",
":",
"start",
",",
"end",
"=",
"other",
".",
"byte_bounds_offset",
"(",
")",
"r",
"=",
"self",
".",
"rawdata",
"[",
"start",
":",
"end",
"]",
"if",
"other",
".",
"rawdata",
".",
"is_indexe... | Get the raw data that is similar to the specified other segment | [
"Get",
"the",
"raw",
"data",
"that",
"is",
"similar",
"to",
"the",
"specified",
"other",
"segment"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L547-L554 | train | 48,804 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.serialize_session | def serialize_session(self, mdict):
"""Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired.
"""
mdict["comment ranges"] = [list(a) for a in self.get_style_ranges(comment=True)]
mdict["data ranges"] = [list(a) for a in self.get_style_ranges(data=True)]
for i in range(1, user_bit_mask):
r = [list(a) for a in self.get_style_ranges(user=i)]
if r:
slot = "user style %d" % i
mdict[slot] = r
# json serialization doesn't allow int keys, so convert to list of
# pairs
mdict["comments"] = self.get_sorted_comments() | python | def serialize_session(self, mdict):
"""Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired.
"""
mdict["comment ranges"] = [list(a) for a in self.get_style_ranges(comment=True)]
mdict["data ranges"] = [list(a) for a in self.get_style_ranges(data=True)]
for i in range(1, user_bit_mask):
r = [list(a) for a in self.get_style_ranges(user=i)]
if r:
slot = "user style %d" % i
mdict[slot] = r
# json serialization doesn't allow int keys, so convert to list of
# pairs
mdict["comments"] = self.get_sorted_comments() | [
"def",
"serialize_session",
"(",
"self",
",",
"mdict",
")",
":",
"mdict",
"[",
"\"comment ranges\"",
"]",
"=",
"[",
"list",
"(",
"a",
")",
"for",
"a",
"in",
"self",
".",
"get_style_ranges",
"(",
"comment",
"=",
"True",
")",
"]",
"mdict",
"[",
"\"data r... | Save extra metadata to a dict so that it can be serialized
This is not saved by __getstate__ because child segments will point to
the same data and this allows it to only be saved for the base segment.
As well as allowing it to be pulled out of the main json so that it can
be more easily edited by hand if desired. | [
"Save",
"extra",
"metadata",
"to",
"a",
"dict",
"so",
"that",
"it",
"can",
"be",
"serialized"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L556-L574 | train | 48,805 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.get_index_from_base_index | def get_index_from_base_index(self, base_index):
"""Get index into this array's data given the index into the base array
"""
r = self.rawdata
try:
index = r.get_reverse_index(base_index)
except IndexError:
raise IndexError("index %d not in this segment" % base_index)
if index < 0:
raise IndexError("index %d not in this segment" % base_index)
return int(index) | python | def get_index_from_base_index(self, base_index):
"""Get index into this array's data given the index into the base array
"""
r = self.rawdata
try:
index = r.get_reverse_index(base_index)
except IndexError:
raise IndexError("index %d not in this segment" % base_index)
if index < 0:
raise IndexError("index %d not in this segment" % base_index)
return int(index) | [
"def",
"get_index_from_base_index",
"(",
"self",
",",
"base_index",
")",
":",
"r",
"=",
"self",
".",
"rawdata",
"try",
":",
"index",
"=",
"r",
".",
"get_reverse_index",
"(",
"base_index",
")",
"except",
"IndexError",
":",
"raise",
"IndexError",
"(",
"\"index... | Get index into this array's data given the index into the base array | [
"Get",
"index",
"into",
"this",
"array",
"s",
"data",
"given",
"the",
"index",
"into",
"the",
"base",
"array"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L652-L662 | train | 48,806 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.get_style_ranges | def get_style_ranges(self, **kwargs):
"""Return a list of start, end pairs that match the specified style
"""
style_bits = self.get_style_bits(**kwargs)
matches = (self.style & style_bits) == style_bits
return self.bool_to_ranges(matches) | python | def get_style_ranges(self, **kwargs):
"""Return a list of start, end pairs that match the specified style
"""
style_bits = self.get_style_bits(**kwargs)
matches = (self.style & style_bits) == style_bits
return self.bool_to_ranges(matches) | [
"def",
"get_style_ranges",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"style_bits",
"=",
"self",
".",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"matches",
"=",
"(",
"self",
".",
"style",
"&",
"style_bits",
")",
"==",
"style_bits",
"return",
... | Return a list of start, end pairs that match the specified style | [
"Return",
"a",
"list",
"of",
"start",
"end",
"pairs",
"that",
"match",
"the",
"specified",
"style"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L689-L694 | train | 48,807 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.fixup_comments | def fixup_comments(self):
"""Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data.
"""
style_base = self.rawdata.style_base
comment_text_indexes = np.asarray(list(self.rawdata.extra.comments.keys()), dtype=np.uint32)
comment_mask = self.get_style_mask(comment=True)
has_comments = np.where(style_base & comment_bit_mask > 0)[0]
both = np.intersect1d(comment_text_indexes, has_comments)
log.info("fixup comments: %d correctly marked, %d without style, %d empty text" % (np.alen(both), np.alen(comment_text_indexes) - np.alen(both), np.alen(has_comments) - np.alen(both)))
style_base &= comment_mask
comment_style = self.get_style_bits(comment=True)
style_base[comment_text_indexes] |= comment_style | python | def fixup_comments(self):
"""Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data.
"""
style_base = self.rawdata.style_base
comment_text_indexes = np.asarray(list(self.rawdata.extra.comments.keys()), dtype=np.uint32)
comment_mask = self.get_style_mask(comment=True)
has_comments = np.where(style_base & comment_bit_mask > 0)[0]
both = np.intersect1d(comment_text_indexes, has_comments)
log.info("fixup comments: %d correctly marked, %d without style, %d empty text" % (np.alen(both), np.alen(comment_text_indexes) - np.alen(both), np.alen(has_comments) - np.alen(both)))
style_base &= comment_mask
comment_style = self.get_style_bits(comment=True)
style_base[comment_text_indexes] |= comment_style | [
"def",
"fixup_comments",
"(",
"self",
")",
":",
"style_base",
"=",
"self",
".",
"rawdata",
".",
"style_base",
"comment_text_indexes",
"=",
"np",
".",
"asarray",
"(",
"list",
"(",
"self",
".",
"rawdata",
".",
"extra",
".",
"comments",
".",
"keys",
"(",
")... | Remove any style bytes that are marked as commented but have no
comment, and add any style bytes where there's a comment but it isn't
marked in the style data.
This happens on the base data, so only need to do this on one segment
that uses this base data. | [
"Remove",
"any",
"style",
"bytes",
"that",
"are",
"marked",
"as",
"commented",
"but",
"have",
"no",
"comment",
"and",
"add",
"any",
"style",
"bytes",
"where",
"there",
"s",
"a",
"comment",
"but",
"it",
"isn",
"t",
"marked",
"in",
"the",
"style",
"data",
... | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L696-L712 | train | 48,808 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.get_entire_style_ranges | def get_entire_style_ranges(self, split_comments=None, **kwargs):
"""Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value.
"""
style_bits = self.get_style_bits(**kwargs)
matches = self.get_comment_locations(**kwargs)
groups = np.split(matches, np.where(np.diff(matches) != 0)[0] + 1)
if split_comments is None:
split_comments = []
# print groups
# split into groups with the same numbers
ranges = []
last_end = 0
if len(groups) == 1 and len(groups[0]) == 0:
# check for degenerate case
return
last_style = -1
for group in groups:
# each group is guaranteed to have the same style
size = len(group)
next_end = last_end + size
style = matches[last_end]
masked_style = style & style_bits
# print last_end, next_end, style, masked_style, size, group
if style & comment_bit_mask:
if masked_style in split_comments:
# print "interesting comment", last_end, next_end
ranges.append(((last_end, next_end), masked_style))
else:
# print "non-interesting comment", last_end, next_end
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
else:
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
last_style = masked_style
last_end = next_end
return ranges | python | def get_entire_style_ranges(self, split_comments=None, **kwargs):
"""Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value.
"""
style_bits = self.get_style_bits(**kwargs)
matches = self.get_comment_locations(**kwargs)
groups = np.split(matches, np.where(np.diff(matches) != 0)[0] + 1)
if split_comments is None:
split_comments = []
# print groups
# split into groups with the same numbers
ranges = []
last_end = 0
if len(groups) == 1 and len(groups[0]) == 0:
# check for degenerate case
return
last_style = -1
for group in groups:
# each group is guaranteed to have the same style
size = len(group)
next_end = last_end + size
style = matches[last_end]
masked_style = style & style_bits
# print last_end, next_end, style, masked_style, size, group
if style & comment_bit_mask:
if masked_style in split_comments:
# print "interesting comment", last_end, next_end
ranges.append(((last_end, next_end), masked_style))
else:
# print "non-interesting comment", last_end, next_end
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
else:
if last_style == masked_style:
((prev_end, _), _) = ranges.pop()
ranges.append(((prev_end, next_end), masked_style))
else:
ranges.append(((last_end, next_end), masked_style))
last_style = masked_style
last_end = next_end
return ranges | [
"def",
"get_entire_style_ranges",
"(",
"self",
",",
"split_comments",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"style_bits",
"=",
"self",
".",
"get_style_bits",
"(",
"*",
"*",
"kwargs",
")",
"matches",
"=",
"self",
".",
"get_comment_locations",
"(",
... | Find sections of the segment that have the same style value.
The arguments to this function are used as a mask for the style to
determine where to split the styles. Style bits that aren't included in
the list will be ignored when splitting. The returned list covers the
entire length of the segment.
Returns a list of tuples, each tuple containing two items: a start, end
tuple; and an integer with the style value. | [
"Find",
"sections",
"of",
"the",
"segment",
"that",
"have",
"the",
"same",
"style",
"value",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L725-L775 | train | 48,809 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.restore_comments | def restore_comments(self, restore_data):
"""Restore comment styles and data
"""
for start, end, styles, items in restore_data:
log.debug("range: %d-%d" % (start, end))
self.style[start:end] = styles
for i in range(start, end):
rawindex, comment = items[i]
if comment:
log.debug(" restoring comment: rawindex=%d, '%s'" % (rawindex, comment))
self.rawdata.extra.comments[rawindex] = comment
else:
# no comment in original data, remove any if exists
try:
del self.rawdata.extra.comments[rawindex]
log.debug(" no comment in original data, removed comment in current data at rawindex=%d" % rawindex)
except KeyError:
log.debug(" no comment in original data or current data at rawindex=%d" % rawindex)
pass | python | def restore_comments(self, restore_data):
"""Restore comment styles and data
"""
for start, end, styles, items in restore_data:
log.debug("range: %d-%d" % (start, end))
self.style[start:end] = styles
for i in range(start, end):
rawindex, comment = items[i]
if comment:
log.debug(" restoring comment: rawindex=%d, '%s'" % (rawindex, comment))
self.rawdata.extra.comments[rawindex] = comment
else:
# no comment in original data, remove any if exists
try:
del self.rawdata.extra.comments[rawindex]
log.debug(" no comment in original data, removed comment in current data at rawindex=%d" % rawindex)
except KeyError:
log.debug(" no comment in original data or current data at rawindex=%d" % rawindex)
pass | [
"def",
"restore_comments",
"(",
"self",
",",
"restore_data",
")",
":",
"for",
"start",
",",
"end",
",",
"styles",
",",
"items",
"in",
"restore_data",
":",
"log",
".",
"debug",
"(",
"\"range: %d-%d\"",
"%",
"(",
"start",
",",
"end",
")",
")",
"self",
".... | Restore comment styles and data | [
"Restore",
"comment",
"styles",
"and",
"data"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L983-L1001 | train | 48,810 |
robmcmullen/atrcopy | atrcopy/segments.py | DefaultSegment.copy_user_data | def copy_user_data(self, source, index_offset=0):
"""Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source.
"""
for index, comment in source.iter_comments_in_segment():
self.set_comment_at(index + index_offset, comment) | python | def copy_user_data(self, source, index_offset=0):
"""Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source.
"""
for index, comment in source.iter_comments_in_segment():
self.set_comment_at(index + index_offset, comment) | [
"def",
"copy_user_data",
"(",
"self",
",",
"source",
",",
"index_offset",
"=",
"0",
")",
":",
"for",
"index",
",",
"comment",
"in",
"source",
".",
"iter_comments_in_segment",
"(",
")",
":",
"self",
".",
"set_comment_at",
"(",
"index",
"+",
"index_offset",
... | Copy comments and other user data from the source segment to this
segment.
The index offset is the offset into self based on the index of source. | [
"Copy",
"comments",
"and",
"other",
"user",
"data",
"from",
"the",
"source",
"segment",
"to",
"this",
"segment",
"."
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/segments.py#L1062-L1069 | train | 48,811 |
openstax/cnx-archive | cnxarchive/views/xpath.py | execute_xpath | def execute_xpath(xpath_string, sql_function, uuid, version):
"""Executes either xpath or xpath-module SQL function with given input
params."""
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
cursor.execute(SQL[sql_function],
{'document_uuid': uuid,
'document_version': version,
'xpath_string': xpath_string})
except psycopg2.Error as e:
exc = httpexceptions.HTTPBadRequest()
exc.explanation = e.pgerror
raise exc
for res in cursor.fetchall():
yield {'name': res[0],
'uuid': res[1],
'version': res[2],
'xpath_results': res[3]} | python | def execute_xpath(xpath_string, sql_function, uuid, version):
"""Executes either xpath or xpath-module SQL function with given input
params."""
settings = get_current_registry().settings
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
cursor.execute(SQL[sql_function],
{'document_uuid': uuid,
'document_version': version,
'xpath_string': xpath_string})
except psycopg2.Error as e:
exc = httpexceptions.HTTPBadRequest()
exc.explanation = e.pgerror
raise exc
for res in cursor.fetchall():
yield {'name': res[0],
'uuid': res[1],
'version': res[2],
'xpath_results': res[3]} | [
"def",
"execute_xpath",
"(",
"xpath_string",
",",
"sql_function",
",",
"uuid",
",",
"version",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".... | Executes either xpath or xpath-module SQL function with given input
params. | [
"Executes",
"either",
"xpath",
"or",
"xpath",
"-",
"module",
"SQL",
"function",
"with",
"given",
"input",
"params",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/xpath.py#L123-L144 | train | 48,812 |
openstax/cnx-archive | cnxarchive/views/content.py | tree_to_html | def tree_to_html(tree):
"""Return html list version of book tree."""
ul = etree.Element('ul')
html_listify([tree], ul)
return HTML_WRAPPER.format(etree.tostring(ul)) | python | def tree_to_html(tree):
"""Return html list version of book tree."""
ul = etree.Element('ul')
html_listify([tree], ul)
return HTML_WRAPPER.format(etree.tostring(ul)) | [
"def",
"tree_to_html",
"(",
"tree",
")",
":",
"ul",
"=",
"etree",
".",
"Element",
"(",
"'ul'",
")",
"html_listify",
"(",
"[",
"tree",
"]",
",",
"ul",
")",
"return",
"HTML_WRAPPER",
".",
"format",
"(",
"etree",
".",
"tostring",
"(",
"ul",
")",
")"
] | Return html list version of book tree. | [
"Return",
"html",
"list",
"version",
"of",
"book",
"tree",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L48-L52 | train | 48,813 |
openstax/cnx-archive | cnxarchive/views/content.py | _get_content_json | def _get_content_json(ident_hash=None):
"""Return a content as a dict from its ident-hash (uuid@version)."""
request = get_current_request()
routing_args = request and request.matchdict or {}
if not ident_hash:
ident_hash = routing_args['ident_hash']
as_collated = asbool(request.GET.get('as_collated', True))
page_ident_hash = routing_args.get('page_ident_hash', '')
p_id, p_version = (None, None)
if page_ident_hash:
try:
p_id, p_version = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
p_id = get_uuid(e.id)
p_version = e.version
except IdentHashMissingVersion as e:
# page ident hash doesn't need a version
p_id = e.id
p_version = None
id, version = split_ident_hash(ident_hash, containing=p_id)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
if result['mediaType'] == COLLECTION_MIMETYPE:
# Grab the collection tree.
result['tree'] = get_tree(ident_hash, cursor,
as_collated=as_collated)
result['collated'] = as_collated
if not result['tree']:
# If collated tree is not available, get the uncollated
# tree.
result['tree'] = get_tree(ident_hash, cursor)
result['collated'] = False
if page_ident_hash:
for id_ in flatten_tree_to_ident_hashes(result['tree']):
id, version = split_ident_hash(id_)
if id == p_id and (
version == p_version or not p_version):
content = None
if as_collated:
content = get_collated_content(
id_, ident_hash, cursor)
if content:
result = get_content_metadata(
id, version, cursor)
# Build url for canonical link header
result['canon_url'] = (
get_canonical_url(result, request))
result['content'] = content[:]
return result
# 302 'cause lack of baked content may be temporary
raise httpexceptions.HTTPFound(request.route_path(
request.matched_route.name,
_query=request.params,
ident_hash=join_ident_hash(id, version),
ext=routing_args['ext']),
headers=[("Cache-Control",
"max-age=60, public")])
raise httpexceptions.HTTPNotFound()
else:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
# Grab the html content.
args = dict(id=id, version=result['version'],
filename='index.cnxml.html')
cursor.execute(SQL['get-resource-by-filename'], args)
try:
content = cursor.fetchone()[0]
except (TypeError, IndexError,): # None returned
logger.debug("module found, but "
"'index.cnxml.html' is missing.")
raise httpexceptions.HTTPNotFound()
result['content'] = content[:]
return result | python | def _get_content_json(ident_hash=None):
"""Return a content as a dict from its ident-hash (uuid@version)."""
request = get_current_request()
routing_args = request and request.matchdict or {}
if not ident_hash:
ident_hash = routing_args['ident_hash']
as_collated = asbool(request.GET.get('as_collated', True))
page_ident_hash = routing_args.get('page_ident_hash', '')
p_id, p_version = (None, None)
if page_ident_hash:
try:
p_id, p_version = split_ident_hash(page_ident_hash)
except IdentHashShortId as e:
p_id = get_uuid(e.id)
p_version = e.version
except IdentHashMissingVersion as e:
# page ident hash doesn't need a version
p_id = e.id
p_version = None
id, version = split_ident_hash(ident_hash, containing=p_id)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
if result['mediaType'] == COLLECTION_MIMETYPE:
# Grab the collection tree.
result['tree'] = get_tree(ident_hash, cursor,
as_collated=as_collated)
result['collated'] = as_collated
if not result['tree']:
# If collated tree is not available, get the uncollated
# tree.
result['tree'] = get_tree(ident_hash, cursor)
result['collated'] = False
if page_ident_hash:
for id_ in flatten_tree_to_ident_hashes(result['tree']):
id, version = split_ident_hash(id_)
if id == p_id and (
version == p_version or not p_version):
content = None
if as_collated:
content = get_collated_content(
id_, ident_hash, cursor)
if content:
result = get_content_metadata(
id, version, cursor)
# Build url for canonical link header
result['canon_url'] = (
get_canonical_url(result, request))
result['content'] = content[:]
return result
# 302 'cause lack of baked content may be temporary
raise httpexceptions.HTTPFound(request.route_path(
request.matched_route.name,
_query=request.params,
ident_hash=join_ident_hash(id, version),
ext=routing_args['ext']),
headers=[("Cache-Control",
"max-age=60, public")])
raise httpexceptions.HTTPNotFound()
else:
result = get_content_metadata(id, version, cursor)
# Build url for canonical link header
result['canon_url'] = get_canonical_url(result, request)
# Grab the html content.
args = dict(id=id, version=result['version'],
filename='index.cnxml.html')
cursor.execute(SQL['get-resource-by-filename'], args)
try:
content = cursor.fetchone()[0]
except (TypeError, IndexError,): # None returned
logger.debug("module found, but "
"'index.cnxml.html' is missing.")
raise httpexceptions.HTTPNotFound()
result['content'] = content[:]
return result | [
"def",
"_get_content_json",
"(",
"ident_hash",
"=",
"None",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"routing_args",
"=",
"request",
"and",
"request",
".",
"matchdict",
"or",
"{",
"}",
"if",
"not",
"ident_hash",
":",
"ident_hash",
"=",
"rou... | Return a content as a dict from its ident-hash (uuid@version). | [
"Return",
"a",
"content",
"as",
"a",
"dict",
"from",
"its",
"ident",
"-",
"hash",
"(",
"uuid"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L55-L137 | train | 48,814 |
openstax/cnx-archive | cnxarchive/views/content.py | get_content_json | def get_content_json(request):
"""Retrieve content as JSON using the ident-hash (uuid@version)."""
result = _get_content_json()
resp = request.response
resp.status = "200 OK"
resp.content_type = 'application/json'
resp.body = json.dumps(result)
return result, resp | python | def get_content_json(request):
"""Retrieve content as JSON using the ident-hash (uuid@version)."""
result = _get_content_json()
resp = request.response
resp.status = "200 OK"
resp.content_type = 'application/json'
resp.body = json.dumps(result)
return result, resp | [
"def",
"get_content_json",
"(",
"request",
")",
":",
"result",
"=",
"_get_content_json",
"(",
")",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"status",
"=",
"\"200 OK\"",
"resp",
".",
"content_type",
"=",
"'application/json'",
"resp",
".",
"body",
... | Retrieve content as JSON using the ident-hash (uuid@version). | [
"Retrieve",
"content",
"as",
"JSON",
"using",
"the",
"ident",
"-",
"hash",
"(",
"uuid"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L140-L148 | train | 48,815 |
openstax/cnx-archive | cnxarchive/views/content.py | get_content_html | def get_content_html(request):
"""Retrieve content as HTML using the ident-hash (uuid@version)."""
result = _get_content_json()
media_type = result['mediaType']
if media_type == COLLECTION_MIMETYPE:
content = tree_to_html(result['tree'])
else:
content = result['content']
resp = request.response
resp.body = content
resp.status = "200 OK"
resp.content_type = 'application/xhtml+xml'
return result, resp | python | def get_content_html(request):
"""Retrieve content as HTML using the ident-hash (uuid@version)."""
result = _get_content_json()
media_type = result['mediaType']
if media_type == COLLECTION_MIMETYPE:
content = tree_to_html(result['tree'])
else:
content = result['content']
resp = request.response
resp.body = content
resp.status = "200 OK"
resp.content_type = 'application/xhtml+xml'
return result, resp | [
"def",
"get_content_html",
"(",
"request",
")",
":",
"result",
"=",
"_get_content_json",
"(",
")",
"media_type",
"=",
"result",
"[",
"'mediaType'",
"]",
"if",
"media_type",
"==",
"COLLECTION_MIMETYPE",
":",
"content",
"=",
"tree_to_html",
"(",
"result",
"[",
"... | Retrieve content as HTML using the ident-hash (uuid@version). | [
"Retrieve",
"content",
"as",
"HTML",
"using",
"the",
"ident",
"-",
"hash",
"(",
"uuid"
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L151-L165 | train | 48,816 |
openstax/cnx-archive | cnxarchive/views/content.py | html_listify | def html_listify(tree, root_ul_element, parent_id=None):
"""Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined.
"""
request = get_current_request()
is_first_node = parent_id is None
if is_first_node:
parent_id = tree[0]['id']
for node in tree:
li_elm = etree.SubElement(root_ul_element, 'li')
a_elm = etree.SubElement(li_elm, 'a')
a_elm.text = node['title']
if node['id'] != 'subcol':
if is_first_node:
a_elm.set('href', request.route_path(
'content', ident_hash=node['id'], ext='.html'))
else:
a_elm.set('href', request.route_path(
'content',
separator=':',
ident_hash=parent_id,
page_ident_hash=node['id'],
ext='.html'))
if 'contents' in node:
elm = etree.SubElement(li_elm, 'ul')
html_listify(node['contents'], elm, parent_id) | python | def html_listify(tree, root_ul_element, parent_id=None):
"""Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined.
"""
request = get_current_request()
is_first_node = parent_id is None
if is_first_node:
parent_id = tree[0]['id']
for node in tree:
li_elm = etree.SubElement(root_ul_element, 'li')
a_elm = etree.SubElement(li_elm, 'a')
a_elm.text = node['title']
if node['id'] != 'subcol':
if is_first_node:
a_elm.set('href', request.route_path(
'content', ident_hash=node['id'], ext='.html'))
else:
a_elm.set('href', request.route_path(
'content',
separator=':',
ident_hash=parent_id,
page_ident_hash=node['id'],
ext='.html'))
if 'contents' in node:
elm = etree.SubElement(li_elm, 'ul')
html_listify(node['contents'], elm, parent_id) | [
"def",
"html_listify",
"(",
"tree",
",",
"root_ul_element",
",",
"parent_id",
"=",
"None",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"is_first_node",
"=",
"parent_id",
"is",
"None",
"if",
"is_first_node",
":",
"parent_id",
"=",
"tree",
"[",
... | Recursively construct HTML nested list version of book tree.
The original caller should not call this function with the
`parent_id` defined. | [
"Recursively",
"construct",
"HTML",
"nested",
"list",
"version",
"of",
"book",
"tree",
".",
"The",
"original",
"caller",
"should",
"not",
"call",
"this",
"function",
"with",
"the",
"parent_id",
"defined",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L168-L195 | train | 48,817 |
openstax/cnx-archive | cnxarchive/views/content.py | get_export_allowable_types | def get_export_allowable_types(cursor, exports_dirs, id, version):
"""Return export types."""
request = get_current_request()
type_settings = request.registry.settings['_type_info']
type_names = [k for k, v in type_settings]
type_infos = [v for k, v in type_settings]
# We took the type_names directly from the setting this function uses to
# check for valid types, so it should never raise an ExportError here
file_tuples = get_export_files(cursor, id, version, type_names,
exports_dirs, read_file=False)
type_settings = dict(type_settings)
for filename, mimetype, file_size, file_created, state, file_content \
in file_tuples:
type_name = filename.rsplit('.', 1)[-1]
type_info = type_settings[type_name]
yield {
'format': type_info['user_friendly_name'],
'filename': filename,
'size': file_size,
'created': file_created and file_created.isoformat() or None,
'state': state,
'details': type_info['description'],
'path': request.route_path(
'export', ident_hash=join_ident_hash(id, version),
type=type_name, ignore=u'/{}'.format(filename))
} | python | def get_export_allowable_types(cursor, exports_dirs, id, version):
"""Return export types."""
request = get_current_request()
type_settings = request.registry.settings['_type_info']
type_names = [k for k, v in type_settings]
type_infos = [v for k, v in type_settings]
# We took the type_names directly from the setting this function uses to
# check for valid types, so it should never raise an ExportError here
file_tuples = get_export_files(cursor, id, version, type_names,
exports_dirs, read_file=False)
type_settings = dict(type_settings)
for filename, mimetype, file_size, file_created, state, file_content \
in file_tuples:
type_name = filename.rsplit('.', 1)[-1]
type_info = type_settings[type_name]
yield {
'format': type_info['user_friendly_name'],
'filename': filename,
'size': file_size,
'created': file_created and file_created.isoformat() or None,
'state': state,
'details': type_info['description'],
'path': request.route_path(
'export', ident_hash=join_ident_hash(id, version),
type=type_name, ignore=u'/{}'.format(filename))
} | [
"def",
"get_export_allowable_types",
"(",
"cursor",
",",
"exports_dirs",
",",
"id",
",",
"version",
")",
":",
"request",
"=",
"get_current_request",
"(",
")",
"type_settings",
"=",
"request",
".",
"registry",
".",
"settings",
"[",
"'_type_info'",
"]",
"type_name... | Return export types. | [
"Return",
"export",
"types",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L222-L247 | train | 48,818 |
openstax/cnx-archive | cnxarchive/views/content.py | get_book_info | def get_book_info(cursor, real_dict_cursor, book_id,
book_version, page_id, page_version):
"""Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book.
"""
book_ident_hash = join_ident_hash(book_id, book_version)
page_ident_hash = join_ident_hash(page_id, page_version)
tree = get_tree(book_ident_hash, cursor)
# Check if the page appears in the book tree
if not tree or page_ident_hash not in flatten_tree_to_ident_hashes(tree):
# Return a 404 error if the page is not actually in the book tree
raise httpexceptions.HTTPNotFound()
sql_statement = """
SELECT m.name as title,
ident_hash(m.uuid, m.major_version, m.minor_version)
as ident_hash,
short_ident_hash(m.uuid, m.major_version, m.minor_version)
as shortId, ARRAY(
SELECT row_to_json(user_row)
FROM (
SELECT u.username, u.first_name as firstname,
u.last_name as surname, u.full_name as fullname,
u.title, u.suffix
) as user_row
) as authors,
m.revised
FROM modules m
JOIN users as u on u.username = ANY(m.authors)
WHERE ident_hash(m.uuid, m.major_version, m.minor_version) = %s
"""
real_dict_cursor.execute(sql_statement, vars=(book_ident_hash,))
return real_dict_cursor.fetchone() | python | def get_book_info(cursor, real_dict_cursor, book_id,
book_version, page_id, page_version):
"""Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book.
"""
book_ident_hash = join_ident_hash(book_id, book_version)
page_ident_hash = join_ident_hash(page_id, page_version)
tree = get_tree(book_ident_hash, cursor)
# Check if the page appears in the book tree
if not tree or page_ident_hash not in flatten_tree_to_ident_hashes(tree):
# Return a 404 error if the page is not actually in the book tree
raise httpexceptions.HTTPNotFound()
sql_statement = """
SELECT m.name as title,
ident_hash(m.uuid, m.major_version, m.minor_version)
as ident_hash,
short_ident_hash(m.uuid, m.major_version, m.minor_version)
as shortId, ARRAY(
SELECT row_to_json(user_row)
FROM (
SELECT u.username, u.first_name as firstname,
u.last_name as surname, u.full_name as fullname,
u.title, u.suffix
) as user_row
) as authors,
m.revised
FROM modules m
JOIN users as u on u.username = ANY(m.authors)
WHERE ident_hash(m.uuid, m.major_version, m.minor_version) = %s
"""
real_dict_cursor.execute(sql_statement, vars=(book_ident_hash,))
return real_dict_cursor.fetchone() | [
"def",
"get_book_info",
"(",
"cursor",
",",
"real_dict_cursor",
",",
"book_id",
",",
"book_version",
",",
"page_id",
",",
"page_version",
")",
":",
"book_ident_hash",
"=",
"join_ident_hash",
"(",
"book_id",
",",
"book_version",
")",
"page_ident_hash",
"=",
"join_i... | Return information about a given book.
Return the book's title, id, shortId, authors and revised date.
Raise HTTPNotFound if the page is not in the book. | [
"Return",
"information",
"about",
"a",
"given",
"book",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L251-L286 | train | 48,819 |
openstax/cnx-archive | cnxarchive/views/content.py | get_portal_type | def get_portal_type(cursor, id, version):
"""Return the module's portal_type."""
args = join_ident_hash(id, version)
sql_statement = """
SELECT m.portal_type
FROM modules as m
WHERE ident_hash(uuid, major_version, minor_version) = %s
"""
cursor.execute(sql_statement, vars=(args,))
res = cursor.fetchone()
if res is None:
return None
else:
return res[0] | python | def get_portal_type(cursor, id, version):
"""Return the module's portal_type."""
args = join_ident_hash(id, version)
sql_statement = """
SELECT m.portal_type
FROM modules as m
WHERE ident_hash(uuid, major_version, minor_version) = %s
"""
cursor.execute(sql_statement, vars=(args,))
res = cursor.fetchone()
if res is None:
return None
else:
return res[0] | [
"def",
"get_portal_type",
"(",
"cursor",
",",
"id",
",",
"version",
")",
":",
"args",
"=",
"join_ident_hash",
"(",
"id",
",",
"version",
")",
"sql_statement",
"=",
"\"\"\"\n SELECT m.portal_type\n FROM modules as m\n WHERE ident_hash(uuid, major_version, minor_versio... | Return the module's portal_type. | [
"Return",
"the",
"module",
"s",
"portal_type",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L290-L304 | train | 48,820 |
openstax/cnx-archive | cnxarchive/views/content.py | get_books_containing_page | def get_books_containing_page(cursor, uuid, version,
context_uuid=None, context_version=None):
"""Return a list of book names and UUIDs
that contain a given module UUID."""
with db_connect() as db_connection:
# Uses a RealDictCursor instead of the regular cursor
with db_connection.cursor(
cursor_factory=psycopg2.extras.RealDictCursor
) as real_dict_cursor:
# In the future the books-containing-page SQL might handle
# all of these cases. For now we branch the code out in here.
if context_uuid and context_version:
return [get_book_info(cursor, real_dict_cursor, context_uuid,
context_version, uuid, version)]
else:
portal_type = get_portal_type(cursor, uuid, version)
if portal_type == 'Module':
real_dict_cursor.execute(SQL['get-books-containing-page'],
{'document_uuid': uuid,
'document_version': version})
return real_dict_cursor.fetchall()
else:
# Books are currently not in any other book
return [] | python | def get_books_containing_page(cursor, uuid, version,
context_uuid=None, context_version=None):
"""Return a list of book names and UUIDs
that contain a given module UUID."""
with db_connect() as db_connection:
# Uses a RealDictCursor instead of the regular cursor
with db_connection.cursor(
cursor_factory=psycopg2.extras.RealDictCursor
) as real_dict_cursor:
# In the future the books-containing-page SQL might handle
# all of these cases. For now we branch the code out in here.
if context_uuid and context_version:
return [get_book_info(cursor, real_dict_cursor, context_uuid,
context_version, uuid, version)]
else:
portal_type = get_portal_type(cursor, uuid, version)
if portal_type == 'Module':
real_dict_cursor.execute(SQL['get-books-containing-page'],
{'document_uuid': uuid,
'document_version': version})
return real_dict_cursor.fetchall()
else:
# Books are currently not in any other book
return [] | [
"def",
"get_books_containing_page",
"(",
"cursor",
",",
"uuid",
",",
"version",
",",
"context_uuid",
"=",
"None",
",",
"context_version",
"=",
"None",
")",
":",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"# Uses a RealDictCursor instead of the regul... | Return a list of book names and UUIDs
that contain a given module UUID. | [
"Return",
"a",
"list",
"of",
"book",
"names",
"and",
"UUIDs",
"that",
"contain",
"a",
"given",
"module",
"UUID",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L307-L330 | train | 48,821 |
openstax/cnx-archive | cnxarchive/views/content.py | get_canonical_url | def get_canonical_url(metadata, request):
"""Builds canonical in book url from a pages metadata."""
slug_title = u'/{}'.format('-'.join(metadata['title'].split()))
settings = get_current_registry().settings
canon_host = settings.get('canonical-hostname',
re.sub('archive.', '', request.host))
if metadata['canonical'] is None:
canon_url = request.route_url(
'content',
ident_hash=metadata['id'],
ignore=slug_title)
else:
canon_url = request.route_url(
'content',
ident_hash=metadata['canonical'],
separator=':',
page_ident_hash=metadata['id'],
ignore=slug_title)
return re.sub(request.host, canon_host, canon_url) | python | def get_canonical_url(metadata, request):
"""Builds canonical in book url from a pages metadata."""
slug_title = u'/{}'.format('-'.join(metadata['title'].split()))
settings = get_current_registry().settings
canon_host = settings.get('canonical-hostname',
re.sub('archive.', '', request.host))
if metadata['canonical'] is None:
canon_url = request.route_url(
'content',
ident_hash=metadata['id'],
ignore=slug_title)
else:
canon_url = request.route_url(
'content',
ident_hash=metadata['canonical'],
separator=':',
page_ident_hash=metadata['id'],
ignore=slug_title)
return re.sub(request.host, canon_host, canon_url) | [
"def",
"get_canonical_url",
"(",
"metadata",
",",
"request",
")",
":",
"slug_title",
"=",
"u'/{}'",
".",
"format",
"(",
"'-'",
".",
"join",
"(",
"metadata",
"[",
"'title'",
"]",
".",
"split",
"(",
")",
")",
")",
"settings",
"=",
"get_current_registry",
"... | Builds canonical in book url from a pages metadata. | [
"Builds",
"canonical",
"in",
"book",
"url",
"from",
"a",
"pages",
"metadata",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L333-L353 | train | 48,822 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | map_indices_child2parent | def map_indices_child2parent(child, child_indices):
"""Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices corresponding to all child events
idx = np.where(pf)[0] # True means present in the child
# indices corresponding to selected child events
parent_indices = idx[child_indices]
return parent_indices | python | def map_indices_child2parent(child, child_indices):
"""Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices corresponding to all child events
idx = np.where(pf)[0] # True means present in the child
# indices corresponding to selected child events
parent_indices = idx[child_indices]
return parent_indices | [
"def",
"map_indices_child2parent",
"(",
"child",
",",
"child_indices",
")",
":",
"parent",
"=",
"child",
".",
"hparent",
"# filters",
"pf",
"=",
"parent",
".",
"filter",
".",
"all",
"# indices corresponding to all child events",
"idx",
"=",
"np",
".",
"where",
"... | Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices | [
"Map",
"child",
"RTDCBase",
"event",
"indices",
"to",
"parent",
"RTDCBase"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L310-L332 | train | 48,823 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | map_indices_child2root | def map_indices_child2root(child, child_indices):
"""Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`)
"""
while True:
indices = map_indices_child2parent(child=child,
child_indices=child_indices)
if isinstance(child.hparent, RTDC_Hierarchy):
child = child.hparent
child_indices = indices
else:
break
return indices | python | def map_indices_child2root(child, child_indices):
"""Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`)
"""
while True:
indices = map_indices_child2parent(child=child,
child_indices=child_indices)
if isinstance(child.hparent, RTDC_Hierarchy):
child = child.hparent
child_indices = indices
else:
break
return indices | [
"def",
"map_indices_child2root",
"(",
"child",
",",
"child_indices",
")",
":",
"while",
"True",
":",
"indices",
"=",
"map_indices_child2parent",
"(",
"child",
"=",
"child",
",",
"child_indices",
"=",
"child_indices",
")",
"if",
"isinstance",
"(",
"child",
".",
... | Map RTDC_Hierarchy event indices to root RTDCBase
Parameters
----------
child: RTDC_Hierarchy
RTDCBase hierarchy child
child_indices: 1d ndarray
child indices to map
Returns
-------
root_indices: 1d ndarray
hierarchy root indices
(not necessarily the indices of `parent`) | [
"Map",
"RTDC_Hierarchy",
"event",
"indices",
"to",
"root",
"RTDCBase"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L335-L359 | train | 48,824 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | map_indices_parent2child | def map_indices_parent2child(child, parent_indices):
"""Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices in child
child_indices = []
count = 0
for ii in range(len(pf)):
if pf[ii]:
# only append indices if they exist in child
if ii in parent_indices:
# current child event count is the child index
child_indices.append(count)
# increment child event count
count += 1
return np.array(child_indices) | python | def map_indices_parent2child(child, parent_indices):
"""Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices in child
child_indices = []
count = 0
for ii in range(len(pf)):
if pf[ii]:
# only append indices if they exist in child
if ii in parent_indices:
# current child event count is the child index
child_indices.append(count)
# increment child event count
count += 1
return np.array(child_indices) | [
"def",
"map_indices_parent2child",
"(",
"child",
",",
"parent_indices",
")",
":",
"parent",
"=",
"child",
".",
"hparent",
"# filters",
"pf",
"=",
"parent",
".",
"filter",
".",
"all",
"# indices in child",
"child_indices",
"=",
"[",
"]",
"count",
"=",
"0",
"f... | Map parent RTDCBase event indices to RTDC_Hierarchy
Parameters
----------
parent: RTDC_Hierarchy
hierarchy child
parent_indices: 1d ndarray
hierarchy parent (`child.hparent`) indices to map
Returns
-------
child_indices: 1d ndarray
child indices | [
"Map",
"parent",
"RTDCBase",
"event",
"indices",
"to",
"RTDC_Hierarchy"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L362-L392 | train | 48,825 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | map_indices_root2child | def map_indices_root2child(child, root_indices):
"""Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices
"""
# construct hierarchy tree containing only RTDC_Hierarchy instances
hierarchy = [child]
while True:
if isinstance(child.hparent, RTDC_Hierarchy):
# the parent is a hierarchy tree
hierarchy.append(child.hparent)
child = child.hparent
else:
break
indices = root_indices
for hp in hierarchy[::-1]: # reverse order
# For each hierarchy parent, map the indices down the
# hierarchy tree.
indices = map_indices_parent2child(child=hp,
parent_indices=indices)
return indices | python | def map_indices_root2child(child, root_indices):
"""Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices
"""
# construct hierarchy tree containing only RTDC_Hierarchy instances
hierarchy = [child]
while True:
if isinstance(child.hparent, RTDC_Hierarchy):
# the parent is a hierarchy tree
hierarchy.append(child.hparent)
child = child.hparent
else:
break
indices = root_indices
for hp in hierarchy[::-1]: # reverse order
# For each hierarchy parent, map the indices down the
# hierarchy tree.
indices = map_indices_parent2child(child=hp,
parent_indices=indices)
return indices | [
"def",
"map_indices_root2child",
"(",
"child",
",",
"root_indices",
")",
":",
"# construct hierarchy tree containing only RTDC_Hierarchy instances",
"hierarchy",
"=",
"[",
"child",
"]",
"while",
"True",
":",
"if",
"isinstance",
"(",
"child",
".",
"hparent",
",",
"RTDC... | Map root RTDCBase event indices to child RTDCBase
Parameters
----------
parent: RTDCBase
hierarchy parent of `child`.
root_indices: 1d ndarray
hierarchy root indices to map
(not necessarily the indices of `parent`)
Returns
-------
child_indices: 1d ndarray
child indices | [
"Map",
"root",
"RTDCBase",
"event",
"indices",
"to",
"child",
"RTDCBase"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L395-L427 | train | 48,826 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | HierarchyFilter.apply_manual_indices | def apply_manual_indices(self, manual_indices):
"""Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work.
"""
if self.parent_changed:
msg = "Cannot apply filter, because parent changed: " \
+ "dataset {}. ".format(self.rtdc_ds) \
+ "Run `RTDC_Hierarchy.apply_filter()` first!"
raise HierarchyFilterError(msg)
else:
self._man_root_ids = list(manual_indices)
cidx = map_indices_root2child(child=self.rtdc_ds,
root_indices=manual_indices)
if len(cidx):
self.manual[cidx] = False | python | def apply_manual_indices(self, manual_indices):
"""Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work.
"""
if self.parent_changed:
msg = "Cannot apply filter, because parent changed: " \
+ "dataset {}. ".format(self.rtdc_ds) \
+ "Run `RTDC_Hierarchy.apply_filter()` first!"
raise HierarchyFilterError(msg)
else:
self._man_root_ids = list(manual_indices)
cidx = map_indices_root2child(child=self.rtdc_ds,
root_indices=manual_indices)
if len(cidx):
self.manual[cidx] = False | [
"def",
"apply_manual_indices",
"(",
"self",
",",
"manual_indices",
")",
":",
"if",
"self",
".",
"parent_changed",
":",
"msg",
"=",
"\"Cannot apply filter, because parent changed: \"",
"+",
"\"dataset {}. \"",
".",
"format",
"(",
"self",
".",
"rtdc_ds",
")",
"+",
"... | Write to `self.manual`
Write `manual_indices` to the boolean array `self.manual`
and also store the indices as `self._man_root_ids`.
Notes
-----
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
`HierarchyFilterError` is raised. This is important, because
the size of the current filter would not match the size of
the filtered events of the parent and thus index-mapping
would not work. | [
"Write",
"to",
"self",
".",
"manual"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L93-L118 | train | 48,827 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | HierarchyFilter.retrieve_manual_indices | def retrieve_manual_indices(self):
"""Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work.
"""
if self.parent_changed:
# ignore
pass
else:
# indices from boolean array
pbool = map_indices_child2root(
child=self.rtdc_ds,
child_indices=np.where(~self.manual)[0]).tolist()
# retrieve all indices that are currently not visible
# previous indices
pold = self._man_root_ids
# all indices previously selected either via
# - self.manual or
# - self.apply_manual_indices
pall = sorted(list(set(pbool + pold)))
# visible indices (only available child indices are returned)
pvis_c = map_indices_root2child(child=self.rtdc_ds,
root_indices=pall).tolist()
# map visible child indices back to root indices
pvis_p = map_indices_child2root(child=self.rtdc_ds,
child_indices=pvis_c).tolist()
# hidden indices
phid = list(set(pall) - set(pvis_p))
# Why not set `all_idx` to `pall`:
# - pbool is considered to be correct
# - pold contains hidden indices, but also might contain
# excess indices from before, i.e. if self.apply_manual_indices
# is called, self.manual is also updated. If however,
# self.manual is updated, self._man_root_ids are not updated.
# Thus, we trust pbool (self.manual) and only use pold
# (self._man_root_ids) to determine hidden indices.
all_idx = list(set(pbool + phid))
self._man_root_ids = sorted(all_idx)
return self._man_root_ids | python | def retrieve_manual_indices(self):
"""Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work.
"""
if self.parent_changed:
# ignore
pass
else:
# indices from boolean array
pbool = map_indices_child2root(
child=self.rtdc_ds,
child_indices=np.where(~self.manual)[0]).tolist()
# retrieve all indices that are currently not visible
# previous indices
pold = self._man_root_ids
# all indices previously selected either via
# - self.manual or
# - self.apply_manual_indices
pall = sorted(list(set(pbool + pold)))
# visible indices (only available child indices are returned)
pvis_c = map_indices_root2child(child=self.rtdc_ds,
root_indices=pall).tolist()
# map visible child indices back to root indices
pvis_p = map_indices_child2root(child=self.rtdc_ds,
child_indices=pvis_c).tolist()
# hidden indices
phid = list(set(pall) - set(pvis_p))
# Why not set `all_idx` to `pall`:
# - pbool is considered to be correct
# - pold contains hidden indices, but also might contain
# excess indices from before, i.e. if self.apply_manual_indices
# is called, self.manual is also updated. If however,
# self.manual is updated, self._man_root_ids are not updated.
# Thus, we trust pbool (self.manual) and only use pold
# (self._man_root_ids) to determine hidden indices.
all_idx = list(set(pbool + phid))
self._man_root_ids = sorted(all_idx)
return self._man_root_ids | [
"def",
"retrieve_manual_indices",
"(",
"self",
")",
":",
"if",
"self",
".",
"parent_changed",
":",
"# ignore",
"pass",
"else",
":",
"# indices from boolean array",
"pbool",
"=",
"map_indices_child2root",
"(",
"child",
"=",
"self",
".",
"rtdc_ds",
",",
"child_indic... | Read from self.manual
Read from the boolean array `self.manual`, index all
occurences of `False` and find the corresponding indices
in the root hierarchy parent, return those and store them
in `self._man_root_ids` as well.
Notes
-----
This method also retrieves hidden indices, i.e. events
that are not part of the current hierarchy child but
which have been manually excluded before and are now
hidden because a hierarchy parent filtered it out.
If `self.parent_changed` is `True`, i.e. the parent applied
a filter and the child did not yet hear about this, then
nothing is computed and `self._man_root_ids` as-is. This
is important, because the size of the current filter would
not match the size of the filtered events of the parent and
thus index-mapping would not work. | [
"Read",
"from",
"self",
".",
"manual"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L120-L175 | train | 48,828 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | RTDC_Hierarchy.apply_filter | def apply_filter(self, *args, **kwargs):
"""Overridden `apply_filter` to perform tasks for hierarchy child"""
if self.filter is not None:
# make sure self.filter knows about root manual indices
self.filter.retrieve_manual_indices()
# Copy event data from hierarchy parent
self.hparent.apply_filter()
# update event index
event_count = np.sum(self.hparent._filter)
self._events = {}
self._events["index"] = np.arange(1, event_count + 1)
# set non-scalar column data
if "contour" in self.hparent:
self._events["contour"] = ChildContour(self)
if "image" in self.hparent:
self._events["image"] = ChildImage(self)
if "mask" in self.hparent:
self._events["mask"] = ChildMask(self)
if "trace" in self.hparent:
trdict = {}
for flname in dfn.FLUOR_TRACES:
if flname in self.hparent["trace"]:
trdict[flname] = ChildTrace(self, flname)
self._events["trace"] = trdict
# update config
self.config["experiment"]["event count"] = event_count
self._init_filters()
super(RTDC_Hierarchy, self).apply_filter(*args, **kwargs) | python | def apply_filter(self, *args, **kwargs):
"""Overridden `apply_filter` to perform tasks for hierarchy child"""
if self.filter is not None:
# make sure self.filter knows about root manual indices
self.filter.retrieve_manual_indices()
# Copy event data from hierarchy parent
self.hparent.apply_filter()
# update event index
event_count = np.sum(self.hparent._filter)
self._events = {}
self._events["index"] = np.arange(1, event_count + 1)
# set non-scalar column data
if "contour" in self.hparent:
self._events["contour"] = ChildContour(self)
if "image" in self.hparent:
self._events["image"] = ChildImage(self)
if "mask" in self.hparent:
self._events["mask"] = ChildMask(self)
if "trace" in self.hparent:
trdict = {}
for flname in dfn.FLUOR_TRACES:
if flname in self.hparent["trace"]:
trdict[flname] = ChildTrace(self, flname)
self._events["trace"] = trdict
# update config
self.config["experiment"]["event count"] = event_count
self._init_filters()
super(RTDC_Hierarchy, self).apply_filter(*args, **kwargs) | [
"def",
"apply_filter",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"filter",
"is",
"not",
"None",
":",
"# make sure self.filter knows about root manual indices",
"self",
".",
"filter",
".",
"retrieve_manual_indices",
"(",... | Overridden `apply_filter` to perform tasks for hierarchy child | [
"Overridden",
"apply_filter",
"to",
"perform",
"tasks",
"for",
"hierarchy",
"child"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L269-L298 | train | 48,829 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_hierarchy.py | RTDC_Hierarchy.hash | def hash(self):
"""Hashes of a hierarchy child changes if the parent changes"""
# Do not apply filters here (speed)
hph = self.hparent.hash
hpfilt = hashobj(self.hparent._filter)
dhash = hashobj(hph + hpfilt)
return dhash | python | def hash(self):
"""Hashes of a hierarchy child changes if the parent changes"""
# Do not apply filters here (speed)
hph = self.hparent.hash
hpfilt = hashobj(self.hparent._filter)
dhash = hashobj(hph + hpfilt)
return dhash | [
"def",
"hash",
"(",
"self",
")",
":",
"# Do not apply filters here (speed)",
"hph",
"=",
"self",
".",
"hparent",
".",
"hash",
"hpfilt",
"=",
"hashobj",
"(",
"self",
".",
"hparent",
".",
"_filter",
")",
"dhash",
"=",
"hashobj",
"(",
"hph",
"+",
"hpfilt",
... | Hashes of a hierarchy child changes if the parent changes | [
"Hashes",
"of",
"a",
"hierarchy",
"child",
"changes",
"if",
"the",
"parent",
"changes"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_hierarchy.py#L301-L307 | train | 48,830 |
ZELLMECHANIK-DRESDEN/dclab | dclab/rtdc_dataset/fmt_tdms/event_image.py | ImageColumn.find_video_file | def find_video_file(rtdc_dataset):
"""Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found.
"""
video = None
if rtdc_dataset._fdir.exists():
# Cell images (video)
videos = [v.name for v in rtdc_dataset._fdir.rglob("*.avi")]
# Filter videos according to measurement number
meas_id = rtdc_dataset._mid
videos = [v for v in videos if v.split("_")[0] == meas_id]
videos.sort()
if len(videos) != 0:
# Defaults to first avi file
video = videos[0]
# g/q video file names. q comes first.
for v in videos:
if v.endswith("imag.avi"):
video = v
break
# add this here, because fRT-DC measurements also contain
# videos ..._proc.avi
elif v.endswith("imaq.avi"):
video = v
break
if video is None:
return None
else:
return rtdc_dataset._fdir / video | python | def find_video_file(rtdc_dataset):
"""Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found.
"""
video = None
if rtdc_dataset._fdir.exists():
# Cell images (video)
videos = [v.name for v in rtdc_dataset._fdir.rglob("*.avi")]
# Filter videos according to measurement number
meas_id = rtdc_dataset._mid
videos = [v for v in videos if v.split("_")[0] == meas_id]
videos.sort()
if len(videos) != 0:
# Defaults to first avi file
video = videos[0]
# g/q video file names. q comes first.
for v in videos:
if v.endswith("imag.avi"):
video = v
break
# add this here, because fRT-DC measurements also contain
# videos ..._proc.avi
elif v.endswith("imaq.avi"):
video = v
break
if video is None:
return None
else:
return rtdc_dataset._fdir / video | [
"def",
"find_video_file",
"(",
"rtdc_dataset",
")",
":",
"video",
"=",
"None",
"if",
"rtdc_dataset",
".",
"_fdir",
".",
"exists",
"(",
")",
":",
"# Cell images (video)",
"videos",
"=",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"rtdc_dataset",
".",
"_fdir",
... | Tries to find a video file that belongs to an RTDC dataset
Returns None if no video file is found. | [
"Tries",
"to",
"find",
"a",
"video",
"file",
"that",
"belongs",
"to",
"an",
"RTDC",
"dataset"
] | 79002c4356e7020c2ba73ab0a3819c9abd4affec | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/fmt_tdms/event_image.py#L66-L95 | train | 48,831 |
robmcmullen/atrcopy | atrcopy/ataridos.py | AtariDosDiskImage.as_new_format | def as_new_format(self, format="ATR"):
""" Create a new disk image in the specified format
"""
first_data = len(self.header)
raw = self.rawdata[first_data:]
data = add_atr_header(raw)
newraw = SegmentData(data)
image = self.__class__(newraw)
return image | python | def as_new_format(self, format="ATR"):
""" Create a new disk image in the specified format
"""
first_data = len(self.header)
raw = self.rawdata[first_data:]
data = add_atr_header(raw)
newraw = SegmentData(data)
image = self.__class__(newraw)
return image | [
"def",
"as_new_format",
"(",
"self",
",",
"format",
"=",
"\"ATR\"",
")",
":",
"first_data",
"=",
"len",
"(",
"self",
".",
"header",
")",
"raw",
"=",
"self",
".",
"rawdata",
"[",
"first_data",
":",
"]",
"data",
"=",
"add_atr_header",
"(",
"raw",
")",
... | Create a new disk image in the specified format | [
"Create",
"a",
"new",
"disk",
"image",
"in",
"the",
"specified",
"format"
] | dafba8e74c718e95cf81fd72c184fa193ecec730 | https://github.com/robmcmullen/atrcopy/blob/dafba8e74c718e95cf81fd72c184fa193ecec730/atrcopy/ataridos.py#L489-L497 | train | 48,832 |
simse/pymitv | pymitv/tv.py | TV.set_source | def set_source(self, source):
"""Selects and saves source."""
route = Navigator(source=self.source).navigate_to_source(source)
# Save new source
self.source = source
return self._send_keystroke(route, wait=True) | python | def set_source(self, source):
"""Selects and saves source."""
route = Navigator(source=self.source).navigate_to_source(source)
# Save new source
self.source = source
return self._send_keystroke(route, wait=True) | [
"def",
"set_source",
"(",
"self",
",",
"source",
")",
":",
"route",
"=",
"Navigator",
"(",
"source",
"=",
"self",
".",
"source",
")",
".",
"navigate_to_source",
"(",
"source",
")",
"# Save new source\r",
"self",
".",
"source",
"=",
"source",
"return",
"sel... | Selects and saves source. | [
"Selects",
"and",
"saves",
"source",
"."
] | 03213f591d70fbf90ba2b6af372e474c9bfb99f6 | https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/tv.py#L100-L107 | train | 48,833 |
openstax/cnx-archive | cnxarchive/__init__.py | find_migrations_directory | def find_migrations_directory():
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations') | python | def find_migrations_directory():
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations') | [
"def",
"find_migrations_directory",
"(",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"'sql/migrations'",
")"
] | Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator. | [
"Finds",
"and",
"returns",
"the",
"location",
"of",
"the",
"database",
"migrations",
"directory",
".",
"This",
"function",
"is",
"used",
"from",
"a",
"setuptools",
"entry",
"-",
"point",
"for",
"db",
"-",
"migrator",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L34-L39 | train | 48,834 |
openstax/cnx-archive | cnxarchive/__init__.py | declare_api_routes | def declare_api_routes(config):
"""Declare routes, with a custom pregenerator."""
# The pregenerator makes sure we can generate a path using
# request.route_path even if we don't have all the variables.
#
# For example, instead of having to do this:
# request.route_path('resource', hash=hash, ignore='')
# it's possible to do this:
# request.route_path('resource', hash=hash)
def pregenerator(path):
# find all the variables in the path
variables = [(s.split(':')[0], '') for s in path.split('{')[1:]]
def wrapper(request, elements, kwargs):
modified_kwargs = dict(variables)
modified_kwargs.update(kwargs)
return elements, modified_kwargs
return wrapper
def add_route(name, path, *args, **kwargs):
return config.add_route(name, path, *args,
pregenerator=pregenerator(path), **kwargs)
add_route('content', '/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}') # noqa cnxarchive.views:get_content
add_route('resource', '/resources/{hash}{ignore:(/.*)?}') # noqa cnxarchive.views:get_resource
add_route('export', '/exports/{ident_hash}.{type}{ignore:(/.*)?}') # noqa cnxarchive.views:get_export
add_route('extras', '/extras{key:(/(featured|messages|licenses|subjects|languages))?}') # noqa cnxarchive.views:extras
add_route('content-extras', '/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}') # noqa cnxarchive.views:get_extra
add_route('search', '/search') # cnxarchive.views:search
add_route('in-book-search', '/search/{ident_hash:([^:/]+)}') # noqa cnxarchive.views:in-book-search
add_route('in-book-search-page', '/search/{ident_hash:([^:/]+)}:{page_ident_hash}') # noqa cnxarchive.views:in_book_search_highlighted_results
add_route('sitemap-index', '/sitemap_index.xml') # noqa cnxarchive.views:sitemap
add_route('sitemap', '/sitemap-{from_id}.xml') # noqa cnxarchive.views:sitemap
add_route('robots', '/robots.txt') # noqa cnxarchive.views:robots
add_route('legacy-redirect', '/content/{objid}{ignore:(/)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-latest', '/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-w-version', '/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('recent', '/feeds/recent.rss') # cnxarchive.views:recent
add_route('oai', '/feeds/oai') # cnxarchive.views:oai
add_route('xpath', '/xpath.html') # cnxarchive.views.xpath
add_route('xpath-json', '/xpath.json') | python | def declare_api_routes(config):
"""Declare routes, with a custom pregenerator."""
# The pregenerator makes sure we can generate a path using
# request.route_path even if we don't have all the variables.
#
# For example, instead of having to do this:
# request.route_path('resource', hash=hash, ignore='')
# it's possible to do this:
# request.route_path('resource', hash=hash)
def pregenerator(path):
# find all the variables in the path
variables = [(s.split(':')[0], '') for s in path.split('{')[1:]]
def wrapper(request, elements, kwargs):
modified_kwargs = dict(variables)
modified_kwargs.update(kwargs)
return elements, modified_kwargs
return wrapper
def add_route(name, path, *args, **kwargs):
return config.add_route(name, path, *args,
pregenerator=pregenerator(path), **kwargs)
add_route('content', '/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}') # noqa cnxarchive.views:get_content
add_route('resource', '/resources/{hash}{ignore:(/.*)?}') # noqa cnxarchive.views:get_resource
add_route('export', '/exports/{ident_hash}.{type}{ignore:(/.*)?}') # noqa cnxarchive.views:get_export
add_route('extras', '/extras{key:(/(featured|messages|licenses|subjects|languages))?}') # noqa cnxarchive.views:extras
add_route('content-extras', '/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}') # noqa cnxarchive.views:get_extra
add_route('search', '/search') # cnxarchive.views:search
add_route('in-book-search', '/search/{ident_hash:([^:/]+)}') # noqa cnxarchive.views:in-book-search
add_route('in-book-search-page', '/search/{ident_hash:([^:/]+)}:{page_ident_hash}') # noqa cnxarchive.views:in_book_search_highlighted_results
add_route('sitemap-index', '/sitemap_index.xml') # noqa cnxarchive.views:sitemap
add_route('sitemap', '/sitemap-{from_id}.xml') # noqa cnxarchive.views:sitemap
add_route('robots', '/robots.txt') # noqa cnxarchive.views:robots
add_route('legacy-redirect', '/content/{objid}{ignore:(/)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-latest', '/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('legacy-redirect-w-version', '/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}') # noqa cnxarchive.views:redirect_legacy_content
add_route('recent', '/feeds/recent.rss') # cnxarchive.views:recent
add_route('oai', '/feeds/oai') # cnxarchive.views:oai
add_route('xpath', '/xpath.html') # cnxarchive.views.xpath
add_route('xpath-json', '/xpath.json') | [
"def",
"declare_api_routes",
"(",
"config",
")",
":",
"# The pregenerator makes sure we can generate a path using",
"# request.route_path even if we don't have all the variables.",
"#",
"# For example, instead of having to do this:",
"# request.route_path('resource', hash=hash, ignore='')",
... | Declare routes, with a custom pregenerator. | [
"Declare",
"routes",
"with",
"a",
"custom",
"pregenerator",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L42-L82 | train | 48,835 |
openstax/cnx-archive | cnxarchive/__init__.py | declare_type_info | def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(':', 1)
type_info = type_info.split(',', 3)
settings['_type_info'].append((type_name, {
'type_name': type_name,
'file_extension': type_info[0],
'mimetype': type_info[1],
'user_friendly_name': type_info[2],
'description': type_info[3],
})) | python | def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(':', 1)
type_info = type_info.split(',', 3)
settings['_type_info'].append((type_name, {
'type_name': type_name,
'file_extension': type_info[0],
'mimetype': type_info[1],
'user_friendly_name': type_info[2],
'description': type_info[3],
})) | [
"def",
"declare_type_info",
"(",
"config",
")",
":",
"settings",
"=",
"config",
".",
"registry",
".",
"settings",
"settings",
"[",
"'_type_info'",
"]",
"=",
"[",
"]",
"for",
"line",
"in",
"settings",
"[",
"'exports-allowable-types'",
"]",
".",
"splitlines",
... | Lookup type info from app configuration. | [
"Lookup",
"type",
"info",
"from",
"app",
"configuration",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L85-L100 | train | 48,836 |
openstax/cnx-archive | cnxarchive/__init__.py | main | def main(global_config, **settings):
"""Main WSGI application factory."""
initialize_sentry_integration()
config = Configurator(settings=settings)
declare_api_routes(config)
declare_type_info(config)
# allowing the pyramid templates to render rss and xml
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.rss')
config.add_jinja2_renderer('.xml')
mandatory_settings = ['exports-directories', 'exports-allowable-types']
for setting in mandatory_settings:
if not settings.get(setting, None):
raise ValueError('Missing {} config setting.'.format(setting))
config.scan(ignore='.tests')
config.include('cnxarchive.events.main')
config.add_tween('cnxarchive.tweens.conditional_http_tween_factory')
return config.make_wsgi_app() | python | def main(global_config, **settings):
"""Main WSGI application factory."""
initialize_sentry_integration()
config = Configurator(settings=settings)
declare_api_routes(config)
declare_type_info(config)
# allowing the pyramid templates to render rss and xml
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.rss')
config.add_jinja2_renderer('.xml')
mandatory_settings = ['exports-directories', 'exports-allowable-types']
for setting in mandatory_settings:
if not settings.get(setting, None):
raise ValueError('Missing {} config setting.'.format(setting))
config.scan(ignore='.tests')
config.include('cnxarchive.events.main')
config.add_tween('cnxarchive.tweens.conditional_http_tween_factory')
return config.make_wsgi_app() | [
"def",
"main",
"(",
"global_config",
",",
"*",
"*",
"settings",
")",
":",
"initialize_sentry_integration",
"(",
")",
"config",
"=",
"Configurator",
"(",
"settings",
"=",
"settings",
")",
"declare_api_routes",
"(",
"config",
")",
"declare_type_info",
"(",
"config... | Main WSGI application factory. | [
"Main",
"WSGI",
"application",
"factory",
"."
] | d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4 | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L137-L159 | train | 48,837 |
lucasmaystre/choix | choix/lsr.py | _init_lsr | def _init_lsr(n_items, alpha, initial_params):
"""Initialize the LSR Markov chain and the weights."""
if initial_params is None:
weights = np.ones(n_items)
else:
weights = exp_transform(initial_params)
chain = alpha * np.ones((n_items, n_items), dtype=float)
return weights, chain | python | def _init_lsr(n_items, alpha, initial_params):
"""Initialize the LSR Markov chain and the weights."""
if initial_params is None:
weights = np.ones(n_items)
else:
weights = exp_transform(initial_params)
chain = alpha * np.ones((n_items, n_items), dtype=float)
return weights, chain | [
"def",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"initial_params",
")",
":",
"if",
"initial_params",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"n_items",
")",
"else",
":",
"weights",
"=",
"exp_transform",
"(",
"initial_params",
")",
... | Initialize the LSR Markov chain and the weights. | [
"Initialize",
"the",
"LSR",
"Markov",
"chain",
"and",
"the",
"weights",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L10-L17 | train | 48,838 |
lucasmaystre/choix | choix/lsr.py | _ilsr | def _ilsr(fun, params, max_iter, tol):
"""Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
converged = NormOfDifferenceTest(tol, order=1)
for _ in range(max_iter):
params = fun(initial_params=params)
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter)) | python | def _ilsr(fun, params, max_iter, tol):
"""Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
converged = NormOfDifferenceTest(tol, order=1)
for _ in range(max_iter):
params = fun(initial_params=params)
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter)) | [
"def",
"_ilsr",
"(",
"fun",
",",
"params",
",",
"max_iter",
",",
"tol",
")",
":",
"converged",
"=",
"NormOfDifferenceTest",
"(",
"tol",
",",
"order",
"=",
"1",
")",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"params",
"=",
"fun",
"(",
"i... | Iteratively refine LSR estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations. | [
"Iteratively",
"refine",
"LSR",
"estimates",
"until",
"convergence",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L20-L33 | train | 48,839 |
lucasmaystre/choix | choix/lsr.py | lsr_pairwise_dense | def lsr_pairwise_dense(comp_mat, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
"""
n_items = comp_mat.shape[0]
ws, chain = _init_lsr(n_items, alpha, initial_params)
denom = np.tile(ws, (n_items, 1))
chain += comp_mat.T / (denom + denom.T)
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain)) | python | def lsr_pairwise_dense(comp_mat, alpha=0.0, initial_params=None):
"""Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters.
"""
n_items = comp_mat.shape[0]
ws, chain = _init_lsr(n_items, alpha, initial_params)
denom = np.tile(ws, (n_items, 1))
chain += comp_mat.T / (denom + denom.T)
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain)) | [
"def",
"lsr_pairwise_dense",
"(",
"comp_mat",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
")",
":",
"n_items",
"=",
"comp_mat",
".",
"shape",
"[",
"0",
"]",
"ws",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"ini... | Compute the LSR estimate of model parameters given dense data.
This function implements the Luce Spectral Ranking inference algorithm
[MG15]_ for dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.lsr_pairwise`, this function is particularly
efficient for dense pairwise-comparison datasets (i.e., containing many
comparisons for a large fraction of item pairs).
The argument ``initial_params`` can be used to iteratively refine an
existing parameter estimate (see the implementation of
:func:`~choix.ilsr_pairwise` for an idea on how this works). If it is set
to `None` (the default), the all-ones vector is used.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to build the transition rates of the LSR Markov chain.
Returns
-------
params : np.array
An estimate of model parameters. | [
"Compute",
"the",
"LSR",
"estimate",
"of",
"model",
"parameters",
"given",
"dense",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L112-L154 | train | 48,840 |
lucasmaystre/choix | choix/lsr.py | ilsr_pairwise_dense | def ilsr_pairwise_dense(
comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol) | python | def ilsr_pairwise_dense(
comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol) | [
"def",
"ilsr_pairwise_dense",
"(",
"comp_mat",
",",
"alpha",
"=",
"0.0",
",",
"initial_params",
"=",
"None",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"fun",
"=",
"functools",
".",
"partial",
"(",
"lsr_pairwise_dense",
",",
"comp_mat"... | Compute the ML estimate of model parameters given dense data.
This function computes the maximum-likelihood (ML) estimate of model
parameters given dense pairwise-comparison data.
The data is described by a pairwise-comparison matrix ``comp_mat`` such
that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins
against item ``j``.
In comparison to :func:`~choix.ilsr_pairwise`, this function is
particularly efficient for dense pairwise-comparison datasets (i.e.,
containing many comparisons for a large fraction of item pairs).
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
comp_mat : np.array
2D square matrix describing the pairwise-comparison outcomes.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters. | [
"Compute",
"the",
"ML",
"estimate",
"of",
"model",
"parameters",
"given",
"dense",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L157-L197 | train | 48,841 |
lucasmaystre/choix | choix/lsr.py | rank_centrality | def rank_centrality(n_items, data, alpha=0.0):
"""Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
_, chain = _init_lsr(n_items, alpha, None)
for winner, loser in data:
chain[loser, winner] += 1.0
# Transform the counts into ratios.
idx = chain > 0 # Indices (i,j) of non-zero entries.
chain[idx] = chain[idx] / (chain + chain.T)[idx]
# Finalize the Markov chain by adding the self-transition rate.
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain)) | python | def rank_centrality(n_items, data, alpha=0.0):
"""Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters.
"""
_, chain = _init_lsr(n_items, alpha, None)
for winner, loser in data:
chain[loser, winner] += 1.0
# Transform the counts into ratios.
idx = chain > 0 # Indices (i,j) of non-zero entries.
chain[idx] = chain[idx] / (chain + chain.T)[idx]
# Finalize the Markov chain by adding the self-transition rate.
chain -= np.diag(chain.sum(axis=1))
return log_transform(statdist(chain)) | [
"def",
"rank_centrality",
"(",
"n_items",
",",
"data",
",",
"alpha",
"=",
"0.0",
")",
":",
"_",
",",
"chain",
"=",
"_init_lsr",
"(",
"n_items",
",",
"alpha",
",",
"None",
")",
"for",
"winner",
",",
"loser",
"in",
"data",
":",
"chain",
"[",
"loser",
... | Compute the Rank Centrality estimate of model parameters.
This function implements Negahban et al.'s Rank Centrality algorithm
[NOS12]_. The algorithm is similar to :func:`~choix.ilsr_pairwise`, but
considers the *ratio* of wins for each pair (instead of the total count).
The transition rates of the Rank Centrality Markov chain are initialized
with ``alpha``. When ``alpha > 0``, this corresponds to a form of
regularization (see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float, optional
Regularization parameter.
Returns
-------
params : numpy.ndarray
An estimate of model parameters. | [
"Compute",
"the",
"Rank",
"Centrality",
"estimate",
"of",
"model",
"parameters",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/lsr.py#L200-L233 | train | 48,842 |
lucasmaystre/choix | choix/ep.py | _log_phi | def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres | python | def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres | [
"def",
"_log_phi",
"(",
"z",
")",
":",
"# Adapted from the GPML function `logphi.m`.",
"if",
"z",
"*",
"z",
"<",
"0.0492",
":",
"# First case: z close to zero.",
"coef",
"=",
"-",
"z",
"/",
"SQRT2PI",
"val",
"=",
"functools",
".",
"reduce",
"(",
"lambda",
"acc... | Stable computation of the log of the Normal CDF and its derivative. | [
"Stable",
"computation",
"of",
"the",
"log",
"of",
"the",
"Normal",
"CDF",
"and",
"its",
"derivative",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L157-L176 | train | 48,843 |
lucasmaystre/choix | choix/ep.py | _init_ws | def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec | python | def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec | [
"def",
"_init_ws",
"(",
"n_items",
",",
"comparisons",
",",
"prior_inv",
",",
"tau",
",",
"nu",
")",
":",
"prec",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_items",
",",
"n_items",
")",
")",
"xs",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"for",
"... | Initialize parameters in the weight space. | [
"Initialize",
"parameters",
"in",
"the",
"weight",
"space",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/ep.py#L227-L237 | train | 48,844 |
lucasmaystre/choix | choix/utils.py | exp_transform | def exp_transform(params):
"""Transform parameters into exp-scale weights."""
weights = np.exp(np.asarray(params) - np.mean(params))
return (len(weights) / weights.sum()) * weights | python | def exp_transform(params):
"""Transform parameters into exp-scale weights."""
weights = np.exp(np.asarray(params) - np.mean(params))
return (len(weights) / weights.sum()) * weights | [
"def",
"exp_transform",
"(",
"params",
")",
":",
"weights",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"asarray",
"(",
"params",
")",
"-",
"np",
".",
"mean",
"(",
"params",
")",
")",
"return",
"(",
"len",
"(",
"weights",
")",
"/",
"weights",
".",
"su... | Transform parameters into exp-scale weights. | [
"Transform",
"parameters",
"into",
"exp",
"-",
"scale",
"weights",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L22-L25 | train | 48,845 |
lucasmaystre/choix | choix/utils.py | softmax | def softmax(xs):
"""Stable implementation of the softmax function."""
ys = xs - np.max(xs)
exps = np.exp(ys)
return exps / exps.sum(axis=0) | python | def softmax(xs):
"""Stable implementation of the softmax function."""
ys = xs - np.max(xs)
exps = np.exp(ys)
return exps / exps.sum(axis=0) | [
"def",
"softmax",
"(",
"xs",
")",
":",
"ys",
"=",
"xs",
"-",
"np",
".",
"max",
"(",
"xs",
")",
"exps",
"=",
"np",
".",
"exp",
"(",
"ys",
")",
"return",
"exps",
"/",
"exps",
".",
"sum",
"(",
"axis",
"=",
"0",
")"
] | Stable implementation of the softmax function. | [
"Stable",
"implementation",
"of",
"the",
"softmax",
"function",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L28-L32 | train | 48,846 |
lucasmaystre/choix | choix/utils.py | inv_posdef | def inv_posdef(mat):
"""Stable inverse of a positive definite matrix."""
# See:
# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf
# - http://scicomp.stackexchange.com/questions/3188
chol = np.linalg.cholesky(mat)
ident = np.eye(mat.shape[0])
res = solve_triangular(chol, ident, lower=True, overwrite_b=True)
return np.transpose(res).dot(res) | python | def inv_posdef(mat):
"""Stable inverse of a positive definite matrix."""
# See:
# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf
# - http://scicomp.stackexchange.com/questions/3188
chol = np.linalg.cholesky(mat)
ident = np.eye(mat.shape[0])
res = solve_triangular(chol, ident, lower=True, overwrite_b=True)
return np.transpose(res).dot(res) | [
"def",
"inv_posdef",
"(",
"mat",
")",
":",
"# See:",
"# - http://www.seas.ucla.edu/~vandenbe/103/lectures/chol.pdf",
"# - http://scicomp.stackexchange.com/questions/3188",
"chol",
"=",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"mat",
")",
"ident",
"=",
"np",
".",
"eye"... | Stable inverse of a positive definite matrix. | [
"Stable",
"inverse",
"of",
"a",
"positive",
"definite",
"matrix",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L46-L54 | train | 48,847 |
lucasmaystre/choix | choix/utils.py | footrule_dist | def footrule_dist(params1, params2=None):
r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="average")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="average")
return np.sum(np.abs(ranks1 - ranks2)) | python | def footrule_dist(params1, params2=None):
r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="average")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="average")
return np.sum(np.abs(ranks1 - ranks2)) | [
"def",
"footrule_dist",
"(",
"params1",
",",
"params2",
"=",
"None",
")",
":",
"assert",
"params2",
"is",
"None",
"or",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"ranks1",
"=",
"rankdata",
"(",
"params1",
",",
"method",
"=",
"\"aver... | r"""Compute Spearman's footrule distance between two models.
This function computes Spearman's footrule distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. Spearman's footrule distance is
defined by
.. math::
\sum_{i=1}^N | \sigma_i - \tau_i |
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Spearman's footrule distance. | [
"r",
"Compute",
"Spearman",
"s",
"footrule",
"distance",
"between",
"two",
"models",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L57-L95 | train | 48,848 |
lucasmaystre/choix | choix/utils.py | kendalltau_dist | def kendalltau_dist(params1, params2=None):
r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="ordinal")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="ordinal")
tau, _ = kendalltau(ranks1, ranks2)
n_items = len(params1)
n_pairs = n_items * (n_items - 1) / 2
return round((n_pairs - n_pairs * tau) / 2) | python | def kendalltau_dist(params1, params2=None):
r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance.
"""
assert params2 is None or len(params1) == len(params2)
ranks1 = rankdata(params1, method="ordinal")
if params2 is None:
ranks2 = np.arange(1, len(params1) + 1, dtype=float)
else:
ranks2 = rankdata(params2, method="ordinal")
tau, _ = kendalltau(ranks1, ranks2)
n_items = len(params1)
n_pairs = n_items * (n_items - 1) / 2
return round((n_pairs - n_pairs * tau) / 2) | [
"def",
"kendalltau_dist",
"(",
"params1",
",",
"params2",
"=",
"None",
")",
":",
"assert",
"params2",
"is",
"None",
"or",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"ranks1",
"=",
"rankdata",
"(",
"params1",
",",
"method",
"=",
"\"or... | r"""Compute the Kendall tau distance between two models.
This function computes the Kendall tau distance between the rankings
induced by two parameter vectors. Let :math:`\sigma_i` be the rank of item
``i`` in the model described by ``params1``, and :math:`\tau_i` be its rank
in the model described by ``params2``. The Kendall tau distance is defined
as the number of pairwise disagreements between the two rankings, i.e.,
.. math::
\sum_{i=1}^N \sum_{j=1}^N
\mathbf{1} \{ \sigma_i > \sigma_j \wedge \tau_i < \tau_j \}
By convention, items with the lowest parameters are ranked first (i.e.,
sorted using the natural order).
If the argument ``params2`` is ``None``, the second model is assumed to
rank the items by their index: item ``0`` has rank 1, item ``1`` has rank
2, etc.
If some values are equal within a parameter vector, all items are given a
distinct rank, corresponding to the order in which the values occur.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like, optional
Parameters of the second model.
Returns
-------
dist : float
Kendall tau distance. | [
"r",
"Compute",
"the",
"Kendall",
"tau",
"distance",
"between",
"two",
"models",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L98-L143 | train | 48,849 |
lucasmaystre/choix | choix/utils.py | rmse | def rmse(params1, params2):
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
"""
assert len(params1) == len(params2)
params1 = np.asarray(params1) - np.mean(params1)
params2 = np.asarray(params2) - np.mean(params2)
sqrt_n = math.sqrt(len(params1))
return np.linalg.norm(params1 - params2, ord=2) / sqrt_n | python | def rmse(params1, params2):
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
"""
assert len(params1) == len(params2)
params1 = np.asarray(params1) - np.mean(params1)
params2 = np.asarray(params2) - np.mean(params2)
sqrt_n = math.sqrt(len(params1))
return np.linalg.norm(params1 - params2, ord=2) / sqrt_n | [
"def",
"rmse",
"(",
"params1",
",",
"params2",
")",
":",
"assert",
"len",
"(",
"params1",
")",
"==",
"len",
"(",
"params2",
")",
"params1",
"=",
"np",
".",
"asarray",
"(",
"params1",
")",
"-",
"np",
".",
"mean",
"(",
"params1",
")",
"params2",
"=",... | r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error. | [
"r",
"Compute",
"the",
"root",
"-",
"mean",
"-",
"squared",
"error",
"between",
"two",
"models",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L146-L165 | train | 48,850 |
lucasmaystre/choix | choix/utils.py | statdist | def statdist(generator):
"""Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution.
"""
generator = np.asarray(generator)
n = generator.shape[0]
with warnings.catch_warnings():
# The LU decomposition raises a warning when the generator matrix is
# singular (which it, by construction, is!).
warnings.filterwarnings("ignore")
lu, piv = spl.lu_factor(generator.T, check_finite=False)
# The last row contains 0's only.
left = lu[:-1,:-1]
right = -lu[:-1,-1]
# Solves system `left * x = right`. Assumes that `left` is
# upper-triangular (ignores lower triangle).
try:
res = spl.solve_triangular(left, right, check_finite=False)
except:
# Ideally we would like to catch `spl.LinAlgError` only, but there seems
# to be a bug in scipy, in the code that raises the LinAlgError (!!).
raise ValueError(
"stationary distribution could not be computed. "
"Perhaps the Markov chain has more than one absorbing class?")
res = np.append(res, 1.0)
return (n / res.sum()) * res | python | def statdist(generator):
"""Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution.
"""
generator = np.asarray(generator)
n = generator.shape[0]
with warnings.catch_warnings():
# The LU decomposition raises a warning when the generator matrix is
# singular (which it, by construction, is!).
warnings.filterwarnings("ignore")
lu, piv = spl.lu_factor(generator.T, check_finite=False)
# The last row contains 0's only.
left = lu[:-1,:-1]
right = -lu[:-1,-1]
# Solves system `left * x = right`. Assumes that `left` is
# upper-triangular (ignores lower triangle).
try:
res = spl.solve_triangular(left, right, check_finite=False)
except:
# Ideally we would like to catch `spl.LinAlgError` only, but there seems
# to be a bug in scipy, in the code that raises the LinAlgError (!!).
raise ValueError(
"stationary distribution could not be computed. "
"Perhaps the Markov chain has more than one absorbing class?")
res = np.append(res, 1.0)
return (n / res.sum()) * res | [
"def",
"statdist",
"(",
"generator",
")",
":",
"generator",
"=",
"np",
".",
"asarray",
"(",
"generator",
")",
"n",
"=",
"generator",
".",
"shape",
"[",
"0",
"]",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# The LU decomposition raises a warni... | Compute the stationary distribution of a Markov chain.
Parameters
----------
generator : array_like
Infinitesimal generator matrix of the Markov chain.
Returns
-------
dist : numpy.ndarray
The unnormalized stationary distribution of the Markov chain.
Raises
------
ValueError
If the Markov chain does not have a unique stationary distribution. | [
"Compute",
"the",
"stationary",
"distribution",
"of",
"a",
"Markov",
"chain",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L218-L257 | train | 48,851 |
lucasmaystre/choix | choix/utils.py | generate_params | def generate_params(n_items, interval=5.0, ordered=False):
r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters.
"""
params = np.random.uniform(low=0, high=interval, size=n_items)
if ordered:
params.sort()
return params - params.mean() | python | def generate_params(n_items, interval=5.0, ordered=False):
r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters.
"""
params = np.random.uniform(low=0, high=interval, size=n_items)
if ordered:
params.sort()
return params - params.mean() | [
"def",
"generate_params",
"(",
"n_items",
",",
"interval",
"=",
"5.0",
",",
"ordered",
"=",
"False",
")",
":",
"params",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"interval",
",",
"size",
"=",
"n_items",
")",
... | r"""Generate random model parameters.
This function samples a parameter independently and uniformly for each
item. ``interval`` defines the width of the uniform distribution.
Parameters
----------
n_items : int
Number of distinct items.
interval : float
Sampling interval.
ordered : bool, optional
If true, the parameters are ordered from lowest to highest.
Returns
-------
params : numpy.ndarray
Model parameters. | [
"r",
"Generate",
"random",
"model",
"parameters",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L260-L283 | train | 48,852 |
lucasmaystre/choix | choix/utils.py | compare | def compare(items, params, rank=False):
"""Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``.
"""
probs = probabilities(items, params)
if rank:
return np.random.choice(items, size=len(items), replace=False, p=probs)
else:
return np.random.choice(items, p=probs) | python | def compare(items, params, rank=False):
"""Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``.
"""
probs = probabilities(items, params)
if rank:
return np.random.choice(items, size=len(items), replace=False, p=probs)
else:
return np.random.choice(items, p=probs) | [
"def",
"compare",
"(",
"items",
",",
"params",
",",
"rank",
"=",
"False",
")",
":",
"probs",
"=",
"probabilities",
"(",
"items",
",",
"params",
")",
"if",
"rank",
":",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"items",
",",
"size",
"=",
"l... | Generate a comparison outcome that follows Luce's axiom.
This function samples an outcome for the comparison of a subset of items,
from a model parametrized by ``params``. If ``rank`` is True, it returns a
ranking over the items, otherwise it returns a single item.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
rank : bool, optional
If true, returns a ranking over the items instead of a single item.
Returns
-------
outcome : int or list of int
The chosen item, or a ranking over ``items``. | [
"Generate",
"a",
"comparison",
"outcome",
"that",
"follows",
"Luce",
"s",
"axiom",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L354-L379 | train | 48,853 |
lucasmaystre/choix | choix/utils.py | probabilities | def probabilities(items, params):
"""Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``.
"""
params = np.asarray(params)
return softmax(params.take(items)) | python | def probabilities(items, params):
"""Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``.
"""
params = np.asarray(params)
return softmax(params.take(items)) | [
"def",
"probabilities",
"(",
"items",
",",
"params",
")",
":",
"params",
"=",
"np",
".",
"asarray",
"(",
"params",
")",
"return",
"softmax",
"(",
"params",
".",
"take",
"(",
"items",
")",
")"
] | Compute the comparison outcome probabilities given a subset of items.
This function computes, for each item in ``items``, the probability that it
would win (i.e., be chosen) in a comparison involving the items, given
model parameters.
Parameters
----------
items : list
Subset of items to compare.
params : array_like
Model parameters.
Returns
-------
probs : numpy.ndarray
A probability distribution over ``items``. | [
"Compute",
"the",
"comparison",
"outcome",
"probabilities",
"given",
"a",
"subset",
"of",
"items",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L382-L402 | train | 48,854 |
lucasmaystre/choix | choix/opt.py | Top1Fcts.from_rankings | def from_rankings(cls, data, penalty):
"""Alternative constructor for ranking data."""
top1 = list()
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
top1.append((winner, ranking[i+1:]))
return cls(top1, penalty) | python | def from_rankings(cls, data, penalty):
"""Alternative constructor for ranking data."""
top1 = list()
for ranking in data:
for i, winner in enumerate(ranking[:-1]):
top1.append((winner, ranking[i+1:]))
return cls(top1, penalty) | [
"def",
"from_rankings",
"(",
"cls",
",",
"data",
",",
"penalty",
")",
":",
"top1",
"=",
"list",
"(",
")",
"for",
"ranking",
"in",
"data",
":",
"for",
"i",
",",
"winner",
"in",
"enumerate",
"(",
"ranking",
"[",
":",
"-",
"1",
"]",
")",
":",
"top1"... | Alternative constructor for ranking data. | [
"Alternative",
"constructor",
"for",
"ranking",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/opt.py#L69-L75 | train | 48,855 |
lucasmaystre/choix | choix/mm.py | _mm | def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
"""
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
"""
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter)) | python | def _mm(n_items, data, initial_params, alpha, max_iter, tol, mm_fun):
"""
Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations.
"""
if initial_params is None:
params = np.zeros(n_items)
else:
params = initial_params
converged = NormOfDifferenceTest(tol=tol, order=1)
for _ in range(max_iter):
nums, denoms = mm_fun(n_items, data, params)
params = log_transform((nums + alpha) / (denoms + alpha))
if converged(params):
return params
raise RuntimeError("Did not converge after {} iterations".format(max_iter)) | [
"def",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"mm_fun",
")",
":",
"if",
"initial_params",
"is",
"None",
":",
"params",
"=",
"np",
".",
"zeros",
"(",
"n_items",
")",
"else",
":",
"par... | Iteratively refine MM estimates until convergence.
Raises
------
RuntimeError
If the algorithm does not converge after `max_iter` iterations. | [
"Iteratively",
"refine",
"MM",
"estimates",
"until",
"convergence",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L10-L29 | train | 48,856 |
lucasmaystre/choix | choix/mm.py | _mm_pairwise | def _mm_pairwise(n_items, data, params):
"""Inner loop of MM algorithm for pairwise data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, loser in data:
wins[winner] += 1.0
val = 1.0 / (weights[winner] + weights[loser])
denoms[winner] += val
denoms[loser] += val
return wins, denoms | python | def _mm_pairwise(n_items, data, params):
"""Inner loop of MM algorithm for pairwise data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, loser in data:
wins[winner] += 1.0
val = 1.0 / (weights[winner] + weights[loser])
denoms[winner] += val
denoms[loser] += val
return wins, denoms | [
"def",
"_mm_pairwise",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"("... | Inner loop of MM algorithm for pairwise data. | [
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"pairwise",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L32-L42 | train | 48,857 |
lucasmaystre/choix | choix/mm.py | _mm_rankings | def _mm_rankings(n_items, data, params):
"""Inner loop of MM algorithm for ranking data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
wins[winner] += 1
val = 1.0 / sum_
for item in ranking[i:]:
denoms[item] += val
sum_ -= weights[winner]
return wins, denoms | python | def _mm_rankings(n_items, data, params):
"""Inner loop of MM algorithm for ranking data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for ranking in data:
sum_ = weights.take(ranking).sum()
for i, winner in enumerate(ranking[:-1]):
wins[winner] += 1
val = 1.0 / sum_
for item in ranking[i:]:
denoms[item] += val
sum_ -= weights[winner]
return wins, denoms | [
"def",
"_mm_rankings",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"("... | Inner loop of MM algorithm for ranking data. | [
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"ranking",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L83-L96 | train | 48,858 |
lucasmaystre/choix | choix/mm.py | _mm_top1 | def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms | python | def _mm_top1(n_items, data, params):
"""Inner loop of MM algorithm for top1 data."""
weights = exp_transform(params)
wins = np.zeros(n_items, dtype=float)
denoms = np.zeros(n_items, dtype=float)
for winner, losers in data:
wins[winner] += 1
val = 1 / (weights.take(losers).sum() + weights[winner])
for item in itertools.chain([winner], losers):
denoms[item] += val
return wins, denoms | [
"def",
"_mm_top1",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"wins",
"=",
"np",
".",
"zeros",
"(",
"n_items",
",",
"dtype",
"=",
"float",
")",
"denoms",
"=",
"np",
".",
"zeros",
"(",
... | Inner loop of MM algorithm for top1 data. | [
"Inner",
"loop",
"of",
"MM",
"algorithm",
"for",
"top1",
"data",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L136-L146 | train | 48,859 |
lucasmaystre/choix | choix/mm.py | _choicerank | def _choicerank(n_items, data, params):
"""Inner loop of ChoiceRank algorithm."""
weights = exp_transform(params)
adj, adj_t, traffic_in, traffic_out = data
# First phase of message passing.
zs = adj.dot(weights)
# Second phase of message passing.
with np.errstate(invalid="ignore"):
denoms = adj_t.dot(traffic_out / zs)
return traffic_in, denoms | python | def _choicerank(n_items, data, params):
"""Inner loop of ChoiceRank algorithm."""
weights = exp_transform(params)
adj, adj_t, traffic_in, traffic_out = data
# First phase of message passing.
zs = adj.dot(weights)
# Second phase of message passing.
with np.errstate(invalid="ignore"):
denoms = adj_t.dot(traffic_out / zs)
return traffic_in, denoms | [
"def",
"_choicerank",
"(",
"n_items",
",",
"data",
",",
"params",
")",
":",
"weights",
"=",
"exp_transform",
"(",
"params",
")",
"adj",
",",
"adj_t",
",",
"traffic_in",
",",
"traffic_out",
"=",
"data",
"# First phase of message passing.",
"zs",
"=",
"adj",
"... | Inner loop of ChoiceRank algorithm. | [
"Inner",
"loop",
"of",
"ChoiceRank",
"algorithm",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L186-L195 | train | 48,860 |
lucasmaystre/choix | choix/mm.py | choicerank | def choicerank(
digraph, traffic_in, traffic_out, weight=None,
initial_params=None, alpha=1.0, max_iter=10000, tol=1e-8):
"""Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported.
"""
import networkx as nx
# Compute the (sparse) adjacency matrix.
n_items = len(digraph)
nodes = np.arange(n_items)
adj = nx.to_scipy_sparse_matrix(digraph, nodelist=nodes, weight=weight)
adj_t = adj.T.tocsr()
# Process the data into a standard form.
traffic_in = np.asarray(traffic_in)
traffic_out = np.asarray(traffic_out)
data = (adj, adj_t, traffic_in, traffic_out)
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _choicerank) | python | def choicerank(
digraph, traffic_in, traffic_out, weight=None,
initial_params=None, alpha=1.0, max_iter=10000, tol=1e-8):
"""Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported.
"""
import networkx as nx
# Compute the (sparse) adjacency matrix.
n_items = len(digraph)
nodes = np.arange(n_items)
adj = nx.to_scipy_sparse_matrix(digraph, nodelist=nodes, weight=weight)
adj_t = adj.T.tocsr()
# Process the data into a standard form.
traffic_in = np.asarray(traffic_in)
traffic_out = np.asarray(traffic_out)
data = (adj, adj_t, traffic_in, traffic_out)
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _choicerank) | [
"def",
"choicerank",
"(",
"digraph",
",",
"traffic_in",
",",
"traffic_out",
",",
"weight",
"=",
"None",
",",
"initial_params",
"=",
"None",
",",
"alpha",
"=",
"1.0",
",",
"max_iter",
"=",
"10000",
",",
"tol",
"=",
"1e-8",
")",
":",
"import",
"networkx",
... | Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported. | [
"Compute",
"the",
"MAP",
"estimate",
"of",
"a",
"network",
"choice",
"model",
"s",
"parameters",
"."
] | 05a57a10bb707338113a9d91601ca528ead7a881 | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L198-L252 | train | 48,861 |
balabit/typesafety | typesafety/validator.py | Validator.decorate | def decorate(cls, function):
'''
Decorate a function so the function call is checked whenever
a call is made. The calls that do not need any checks are skipped.
The `function` argument is the function to be decorated.
The return value will be either
* the function itself, if there is nothing to validate, or
* a proxy function that will execute the validation.
'''
should_skip = getattr(function, 'typesafety_skip', False)
if cls.is_function_validated(function) or should_skip:
return function
validator = cls(function)
if not validator.need_validate_arguments and \
not validator.need_validate_return_value:
return function
@functools.wraps(function)
def __wrapper(*args, **kwargs):
return validator(*args, **kwargs)
__wrapper.__validator__ = validator
return __wrapper | python | def decorate(cls, function):
'''
Decorate a function so the function call is checked whenever
a call is made. The calls that do not need any checks are skipped.
The `function` argument is the function to be decorated.
The return value will be either
* the function itself, if there is nothing to validate, or
* a proxy function that will execute the validation.
'''
should_skip = getattr(function, 'typesafety_skip', False)
if cls.is_function_validated(function) or should_skip:
return function
validator = cls(function)
if not validator.need_validate_arguments and \
not validator.need_validate_return_value:
return function
@functools.wraps(function)
def __wrapper(*args, **kwargs):
return validator(*args, **kwargs)
__wrapper.__validator__ = validator
return __wrapper | [
"def",
"decorate",
"(",
"cls",
",",
"function",
")",
":",
"should_skip",
"=",
"getattr",
"(",
"function",
",",
"'typesafety_skip'",
",",
"False",
")",
"if",
"cls",
".",
"is_function_validated",
"(",
"function",
")",
"or",
"should_skip",
":",
"return",
"funct... | Decorate a function so the function call is checked whenever
a call is made. The calls that do not need any checks are skipped.
The `function` argument is the function to be decorated.
The return value will be either
* the function itself, if there is nothing to validate, or
* a proxy function that will execute the validation. | [
"Decorate",
"a",
"function",
"so",
"the",
"function",
"call",
"is",
"checked",
"whenever",
"a",
"call",
"is",
"made",
".",
"The",
"calls",
"that",
"do",
"not",
"need",
"any",
"checks",
"are",
"skipped",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/validator.py#L75-L104 | train | 48,862 |
balabit/typesafety | typesafety/validator.py | Validator.undecorate | def undecorate(cls, function):
'''
Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator.
'''
if cls.is_function_validated(function):
return cls.get_function_validator(function).function
return function | python | def undecorate(cls, function):
'''
Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator.
'''
if cls.is_function_validated(function):
return cls.get_function_validator(function).function
return function | [
"def",
"undecorate",
"(",
"cls",
",",
"function",
")",
":",
"if",
"cls",
".",
"is_function_validated",
"(",
"function",
")",
":",
"return",
"cls",
".",
"get_function_validator",
"(",
"function",
")",
".",
"function",
"return",
"function"
] | Remove validator decoration from a function.
The `function` argument is the function to be cleaned from
the validator decorator. | [
"Remove",
"validator",
"decoration",
"from",
"a",
"function",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/validator.py#L107-L118 | train | 48,863 |
balabit/typesafety | typesafety/validator.py | Validator.validate_arguments | def validate_arguments(self, locals_dict):
'''
Validate the arguments passed to a function. If an error occurred,
the function will throw a TypesafetyError.
The `locals_dict` argument should be the local value dictionary of
the function. An example call would be like:
'''
for key, value, validator in self.__map_arguments(locals_dict):
if not self.__is_valid(value, validator):
key_name = repr(key)
func_name = self.__function.__name__
annotation = self.__argument_annotation.get(key)
message = self.ARG_TYPE_ERROR_MESSAGE.format(
key_name,
func_name,
self.__format_expectation(annotation),
value.__class__.__name__)
raise TypesafetyError(message) | python | def validate_arguments(self, locals_dict):
'''
Validate the arguments passed to a function. If an error occurred,
the function will throw a TypesafetyError.
The `locals_dict` argument should be the local value dictionary of
the function. An example call would be like:
'''
for key, value, validator in self.__map_arguments(locals_dict):
if not self.__is_valid(value, validator):
key_name = repr(key)
func_name = self.__function.__name__
annotation = self.__argument_annotation.get(key)
message = self.ARG_TYPE_ERROR_MESSAGE.format(
key_name,
func_name,
self.__format_expectation(annotation),
value.__class__.__name__)
raise TypesafetyError(message) | [
"def",
"validate_arguments",
"(",
"self",
",",
"locals_dict",
")",
":",
"for",
"key",
",",
"value",
",",
"validator",
"in",
"self",
".",
"__map_arguments",
"(",
"locals_dict",
")",
":",
"if",
"not",
"self",
".",
"__is_valid",
"(",
"value",
",",
"validator"... | Validate the arguments passed to a function. If an error occurred,
the function will throw a TypesafetyError.
The `locals_dict` argument should be the local value dictionary of
the function. An example call would be like: | [
"Validate",
"the",
"arguments",
"passed",
"to",
"a",
"function",
".",
"If",
"an",
"error",
"occurred",
"the",
"function",
"will",
"throw",
"a",
"TypesafetyError",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/validator.py#L155-L174 | train | 48,864 |
balabit/typesafety | typesafety/validator.py | Validator.validate_return_value | def validate_return_value(self, retval):
'''
Validate the return value of a function call. If an error occurred,
the function will throw a TypesafetyError.
The `retval` should contain the return value of the function call.
'''
if self.__return_annotation is None:
return
if not self.__is_valid(retval, self.__return_annotation):
func_name = self.__function.__name__
msg = self.RET_TYPE_ERROR_MESSAGE.format(
func_name,
self.__format_expectation(self.__return_annotation),
retval.__class__.__name__
)
raise TypesafetyError(msg) | python | def validate_return_value(self, retval):
'''
Validate the return value of a function call. If an error occurred,
the function will throw a TypesafetyError.
The `retval` should contain the return value of the function call.
'''
if self.__return_annotation is None:
return
if not self.__is_valid(retval, self.__return_annotation):
func_name = self.__function.__name__
msg = self.RET_TYPE_ERROR_MESSAGE.format(
func_name,
self.__format_expectation(self.__return_annotation),
retval.__class__.__name__
)
raise TypesafetyError(msg) | [
"def",
"validate_return_value",
"(",
"self",
",",
"retval",
")",
":",
"if",
"self",
".",
"__return_annotation",
"is",
"None",
":",
"return",
"if",
"not",
"self",
".",
"__is_valid",
"(",
"retval",
",",
"self",
".",
"__return_annotation",
")",
":",
"func_name"... | Validate the return value of a function call. If an error occurred,
the function will throw a TypesafetyError.
The `retval` should contain the return value of the function call. | [
"Validate",
"the",
"return",
"value",
"of",
"a",
"function",
"call",
".",
"If",
"an",
"error",
"occurred",
"the",
"function",
"will",
"throw",
"a",
"TypesafetyError",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/validator.py#L190-L208 | train | 48,865 |
balabit/typesafety | typesafety/finder.py | ModuleFinder.find_module | def find_module(self, fullname, path=None):
'''
Find the module. Required for the Python meta-loading mechanism.
This will do nothing, since we use the system to locate a module.
'''
loader = None
if self.__filter is None or self.__filter(fullname):
loader = ModuleLoader(self, fullname, path)
return loader | python | def find_module(self, fullname, path=None):
'''
Find the module. Required for the Python meta-loading mechanism.
This will do nothing, since we use the system to locate a module.
'''
loader = None
if self.__filter is None or self.__filter(fullname):
loader = ModuleLoader(self, fullname, path)
return loader | [
"def",
"find_module",
"(",
"self",
",",
"fullname",
",",
"path",
"=",
"None",
")",
":",
"loader",
"=",
"None",
"if",
"self",
".",
"__filter",
"is",
"None",
"or",
"self",
".",
"__filter",
"(",
"fullname",
")",
":",
"loader",
"=",
"ModuleLoader",
"(",
... | Find the module. Required for the Python meta-loading mechanism.
This will do nothing, since we use the system to locate a module. | [
"Find",
"the",
"module",
".",
"Required",
"for",
"the",
"Python",
"meta",
"-",
"loading",
"mechanism",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/finder.py#L137-L148 | train | 48,866 |
balabit/typesafety | typesafety/finder.py | ModuleFinder.load_module | def load_module(self, loader):
'''
Load the module. Required for the Python meta-loading mechanism.
'''
modfile, pathname, description = loader.info
module = imp.load_module(
loader.fullname,
modfile,
pathname,
description
)
sys.modules[loader.fullname] = module
self.__loaded_modules.add(loader.fullname)
autodecorator.decorate_module(module, decorator=self.__decorator)
return module | python | def load_module(self, loader):
'''
Load the module. Required for the Python meta-loading mechanism.
'''
modfile, pathname, description = loader.info
module = imp.load_module(
loader.fullname,
modfile,
pathname,
description
)
sys.modules[loader.fullname] = module
self.__loaded_modules.add(loader.fullname)
autodecorator.decorate_module(module, decorator=self.__decorator)
return module | [
"def",
"load_module",
"(",
"self",
",",
"loader",
")",
":",
"modfile",
",",
"pathname",
",",
"description",
"=",
"loader",
".",
"info",
"module",
"=",
"imp",
".",
"load_module",
"(",
"loader",
".",
"fullname",
",",
"modfile",
",",
"pathname",
",",
"descr... | Load the module. Required for the Python meta-loading mechanism. | [
"Load",
"the",
"module",
".",
"Required",
"for",
"the",
"Python",
"meta",
"-",
"loading",
"mechanism",
"."
] | 452242dd93da9ebd53c173c243156d1351cd96fd | https://github.com/balabit/typesafety/blob/452242dd93da9ebd53c173c243156d1351cd96fd/typesafety/finder.py#L150-L167 | train | 48,867 |
OnroerendErfgoed/oe_utils | oe_utils/search/indexer.py | Indexer.after_commit_listener | def after_commit_listener(self, session):
"""
Processing the changes.
All new or changed items are now indexed. All deleted items are now removed from the index.
"""
log.info('Commiting indexing orders for session %s' % session)
try:
if not any((session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name])):
return
if session.redis is not None:
self._queue_job(session.redis,
self.settings['redis.queue_name'],
self.index_operation_name,
session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name],
self.settings)
else:
log.info('Redis not found, falling back to indexing synchronously without redis')
self.index_operation(
session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name],
self.settings
)
session.index_new[self.cls_name].clear()
session.index_dirty[self.cls_name].clear()
session.index_deleted[self.cls_name].clear()
except AttributeError:
log.warning('Trying to commit indexing orders, but indexing sets are not present.') | python | def after_commit_listener(self, session):
"""
Processing the changes.
All new or changed items are now indexed. All deleted items are now removed from the index.
"""
log.info('Commiting indexing orders for session %s' % session)
try:
if not any((session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name])):
return
if session.redis is not None:
self._queue_job(session.redis,
self.settings['redis.queue_name'],
self.index_operation_name,
session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name],
self.settings)
else:
log.info('Redis not found, falling back to indexing synchronously without redis')
self.index_operation(
session.index_new[self.cls_name],
session.index_dirty[self.cls_name],
session.index_deleted[self.cls_name],
self.settings
)
session.index_new[self.cls_name].clear()
session.index_dirty[self.cls_name].clear()
session.index_deleted[self.cls_name].clear()
except AttributeError:
log.warning('Trying to commit indexing orders, but indexing sets are not present.') | [
"def",
"after_commit_listener",
"(",
"self",
",",
"session",
")",
":",
"log",
".",
"info",
"(",
"'Commiting indexing orders for session %s'",
"%",
"session",
")",
"try",
":",
"if",
"not",
"any",
"(",
"(",
"session",
".",
"index_new",
"[",
"self",
".",
"cls_n... | Processing the changes.
All new or changed items are now indexed. All deleted items are now removed from the index. | [
"Processing",
"the",
"changes",
".",
"All",
"new",
"or",
"changed",
"items",
"are",
"now",
"indexed",
".",
"All",
"deleted",
"items",
"are",
"now",
"removed",
"from",
"the",
"index",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/indexer.py#L63-L94 | train | 48,868 |
OnroerendErfgoed/oe_utils | oe_utils/search/indexer.py | Indexer.after_rollback_listener | def after_rollback_listener(self, session):
"""
Rollback of the transaction, undo the indexes.
If our transaction is terminated, we will reset the
indexing assignments.
"""
log.info('Removing indexing orders.')
try:
session.index_new[self.cls_name].clear()
session.index_dirty[self.cls_name].clear()
session.index_deleted[self.cls_name].clear()
except (AttributeError, KeyError):
log.warning('Trying to remove indexing orders, but indexing sets are not present.') | python | def after_rollback_listener(self, session):
"""
Rollback of the transaction, undo the indexes.
If our transaction is terminated, we will reset the
indexing assignments.
"""
log.info('Removing indexing orders.')
try:
session.index_new[self.cls_name].clear()
session.index_dirty[self.cls_name].clear()
session.index_deleted[self.cls_name].clear()
except (AttributeError, KeyError):
log.warning('Trying to remove indexing orders, but indexing sets are not present.') | [
"def",
"after_rollback_listener",
"(",
"self",
",",
"session",
")",
":",
"log",
".",
"info",
"(",
"'Removing indexing orders.'",
")",
"try",
":",
"session",
".",
"index_new",
"[",
"self",
".",
"cls_name",
"]",
".",
"clear",
"(",
")",
"session",
".",
"index... | Rollback of the transaction, undo the indexes.
If our transaction is terminated, we will reset the
indexing assignments. | [
"Rollback",
"of",
"the",
"transaction",
"undo",
"the",
"indexes",
".",
"If",
"our",
"transaction",
"is",
"terminated",
"we",
"will",
"reset",
"the",
"indexing",
"assignments",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/indexer.py#L111-L123 | train | 48,869 |
OnroerendErfgoed/oe_utils | oe_utils/search/__init__.py | parse_sort_string | def parse_sort_string(sort):
"""
Parse a sort string for use with elasticsearch
:param: sort: the sort string
"""
if sort is None:
return ['_score']
l = sort.rsplit(',')
sortlist = []
for se in l:
se = se.strip()
order = 'desc' if se[0:1] == '-' else 'asc'
field = se[1:] if se[0:1] in ['-', '+'] else se
field = field.strip()
sortlist.append({field: {"order": order, "unmapped_type": "string", "missing": "_last"}})
sortlist.append('_score')
return sortlist | python | def parse_sort_string(sort):
"""
Parse a sort string for use with elasticsearch
:param: sort: the sort string
"""
if sort is None:
return ['_score']
l = sort.rsplit(',')
sortlist = []
for se in l:
se = se.strip()
order = 'desc' if se[0:1] == '-' else 'asc'
field = se[1:] if se[0:1] in ['-', '+'] else se
field = field.strip()
sortlist.append({field: {"order": order, "unmapped_type": "string", "missing": "_last"}})
sortlist.append('_score')
return sortlist | [
"def",
"parse_sort_string",
"(",
"sort",
")",
":",
"if",
"sort",
"is",
"None",
":",
"return",
"[",
"'_score'",
"]",
"l",
"=",
"sort",
".",
"rsplit",
"(",
"','",
")",
"sortlist",
"=",
"[",
"]",
"for",
"se",
"in",
"l",
":",
"se",
"=",
"se",
".",
... | Parse a sort string for use with elasticsearch
:param: sort: the sort string | [
"Parse",
"a",
"sort",
"string",
"for",
"use",
"with",
"elasticsearch"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/__init__.py#L22-L39 | train | 48,870 |
OnroerendErfgoed/oe_utils | oe_utils/search/__init__.py | parse_filter_params | def parse_filter_params(query_params, filterable):
"""
Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values
"""
if query_params is not None:
filter_params = {}
for fq in query_params.mixed():
if fq in filterable:
filter_params[fq] = query_params.mixed().get(fq)
return filter_params
else:
return {} | python | def parse_filter_params(query_params, filterable):
"""
Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values
"""
if query_params is not None:
filter_params = {}
for fq in query_params.mixed():
if fq in filterable:
filter_params[fq] = query_params.mixed().get(fq)
return filter_params
else:
return {} | [
"def",
"parse_filter_params",
"(",
"query_params",
",",
"filterable",
")",
":",
"if",
"query_params",
"is",
"not",
"None",
":",
"filter_params",
"=",
"{",
"}",
"for",
"fq",
"in",
"query_params",
".",
"mixed",
"(",
")",
":",
"if",
"fq",
"in",
"filterable",
... | Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values | [
"Parse",
"query_params",
"to",
"a",
"filter",
"params",
"dict",
".",
"Merge",
"multiple",
"values",
"for",
"one",
"key",
"to",
"a",
"list",
".",
"Filter",
"out",
"keys",
"that",
"aren",
"t",
"filterable",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/__init__.py#L42-L58 | train | 48,871 |
OnroerendErfgoed/oe_utils | oe_utils/views/atom.py | AtomFeedView.init_atom_feed | def init_atom_feed(self, feed):
"""
Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = FeedGenerator()
atom_feed.id(id=self.request.route_url(self.get_atom_feed_url, id=feed.id))
atom_feed.link(href=self.request.route_url(self.get_atom_feed_url, id=feed.id), rel='self')
atom_feed.language('nl-BE')
self.link_to_sibling(feed, 'previous', atom_feed)
self.link_to_sibling(feed, 'next', atom_feed)
return atom_feed | python | def init_atom_feed(self, feed):
"""
Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = FeedGenerator()
atom_feed.id(id=self.request.route_url(self.get_atom_feed_url, id=feed.id))
atom_feed.link(href=self.request.route_url(self.get_atom_feed_url, id=feed.id), rel='self')
atom_feed.language('nl-BE')
self.link_to_sibling(feed, 'previous', atom_feed)
self.link_to_sibling(feed, 'next', atom_feed)
return atom_feed | [
"def",
"init_atom_feed",
"(",
"self",
",",
"feed",
")",
":",
"atom_feed",
"=",
"FeedGenerator",
"(",
")",
"atom_feed",
".",
"id",
"(",
"id",
"=",
"self",
".",
"request",
".",
"route_url",
"(",
"self",
".",
"get_atom_feed_url",
",",
"id",
"=",
"feed",
"... | Initializing an atom feed `feedgen.feed.FeedGenerator` given a feed object
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator` | [
"Initializing",
"an",
"atom",
"feed",
"feedgen",
".",
"feed",
".",
"FeedGenerator",
"given",
"a",
"feed",
"object"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/views/atom.py#L132-L145 | train | 48,872 |
OnroerendErfgoed/oe_utils | oe_utils/views/atom.py | AtomFeedView._generate_atom_feed | def _generate_atom_feed(self, feed):
"""
A function returning a feed like `feedgen.feed.FeedGenerator`.
The function can be overwritten when used in other applications.
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = self.init_atom_feed(feed)
atom_feed.title("Feed")
return atom_feed | python | def _generate_atom_feed(self, feed):
"""
A function returning a feed like `feedgen.feed.FeedGenerator`.
The function can be overwritten when used in other applications.
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator`
"""
atom_feed = self.init_atom_feed(feed)
atom_feed.title("Feed")
return atom_feed | [
"def",
"_generate_atom_feed",
"(",
"self",
",",
"feed",
")",
":",
"atom_feed",
"=",
"self",
".",
"init_atom_feed",
"(",
"feed",
")",
"atom_feed",
".",
"title",
"(",
"\"Feed\"",
")",
"return",
"atom_feed"
] | A function returning a feed like `feedgen.feed.FeedGenerator`.
The function can be overwritten when used in other applications.
:param feed: a feed object
:return: an atom feed `feedgen.feed.FeedGenerator` | [
"A",
"function",
"returning",
"a",
"feed",
"like",
"feedgen",
".",
"feed",
".",
"FeedGenerator",
".",
"The",
"function",
"can",
"be",
"overwritten",
"when",
"used",
"in",
"other",
"applications",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/views/atom.py#L147-L157 | train | 48,873 |
alphagov/gapy | gapy/client.py | from_private_key | def from_private_key(account_name, private_key=None, private_key_path=None,
storage=None, storage_path=None, api_version="v3",
readonly=False, http_client=None, ga_hook=None):
"""Create a client for a service account.
Create a client with an account name and a private key.
Args:
account_name: str, the account identifier (probably the account email).
private_key: str, the private key as a string.
private_key_path: str, path to a file with the private key in.
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA.
"""
if not private_key:
if not private_key_path:
raise GapyError(
"Must provide either a private_key or a private_key_file")
if isinstance(private_key_path, basestring):
private_key_path = open(private_key_path)
private_key = private_key_path.read()
storage = _get_storage(storage, storage_path)
scope = GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE
credentials = SignedJwtAssertionCredentials(account_name, private_key,
scope)
credentials.set_store(storage)
return Client(_build(credentials, api_version, http_client), ga_hook) | python | def from_private_key(account_name, private_key=None, private_key_path=None,
storage=None, storage_path=None, api_version="v3",
readonly=False, http_client=None, ga_hook=None):
"""Create a client for a service account.
Create a client with an account name and a private key.
Args:
account_name: str, the account identifier (probably the account email).
private_key: str, the private key as a string.
private_key_path: str, path to a file with the private key in.
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA.
"""
if not private_key:
if not private_key_path:
raise GapyError(
"Must provide either a private_key or a private_key_file")
if isinstance(private_key_path, basestring):
private_key_path = open(private_key_path)
private_key = private_key_path.read()
storage = _get_storage(storage, storage_path)
scope = GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE
credentials = SignedJwtAssertionCredentials(account_name, private_key,
scope)
credentials.set_store(storage)
return Client(_build(credentials, api_version, http_client), ga_hook) | [
"def",
"from_private_key",
"(",
"account_name",
",",
"private_key",
"=",
"None",
",",
"private_key_path",
"=",
"None",
",",
"storage",
"=",
"None",
",",
"storage_path",
"=",
"None",
",",
"api_version",
"=",
"\"v3\"",
",",
"readonly",
"=",
"False",
",",
"http... | Create a client for a service account.
Create a client with an account name and a private key.
Args:
account_name: str, the account identifier (probably the account email).
private_key: str, the private key as a string.
private_key_path: str, path to a file with the private key in.
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA. | [
"Create",
"a",
"client",
"for",
"a",
"service",
"account",
"."
] | 5e8cc058c54d6034fa0f5177d5a6d3d2e71fa5ea | https://github.com/alphagov/gapy/blob/5e8cc058c54d6034fa0f5177d5a6d3d2e71fa5ea/gapy/client.py#L24-L59 | train | 48,874 |
alphagov/gapy | gapy/client.py | _build | def _build(credentials, api_version, http_client=None):
"""Build the client object."""
if not http_client:
http_client = httplib2.Http()
authorised_client = credentials.authorize(http_client)
return build("analytics", api_version, http=authorised_client) | python | def _build(credentials, api_version, http_client=None):
"""Build the client object."""
if not http_client:
http_client = httplib2.Http()
authorised_client = credentials.authorize(http_client)
return build("analytics", api_version, http=authorised_client) | [
"def",
"_build",
"(",
"credentials",
",",
"api_version",
",",
"http_client",
"=",
"None",
")",
":",
"if",
"not",
"http_client",
":",
"http_client",
"=",
"httplib2",
".",
"Http",
"(",
")",
"authorised_client",
"=",
"credentials",
".",
"authorize",
"(",
"http_... | Build the client object. | [
"Build",
"the",
"client",
"object",
"."
] | 5e8cc058c54d6034fa0f5177d5a6d3d2e71fa5ea | https://github.com/alphagov/gapy/blob/5e8cc058c54d6034fa0f5177d5a6d3d2e71fa5ea/gapy/client.py#L114-L121 | train | 48,875 |
wrwrwr/scikit-gof | skgof/addist.py | ad_unif_inf | def ad_unif_inf(statistic):
"""
Approximates the limiting distribution to about 5 decimal digits.
"""
z = statistic
if z < 2:
return (exp(-1.2337141 / z) / sqrt(z) *
(2.00012 + (.247105 - (.0649821 - (.0347962 -
(.011672 - .00168691 * z) * z) * z) * z) * z))
else:
return exp(-exp(1.0776 - (2.30695 - (.43424 - (.082433 -
(.008056 - .0003146 * z) * z) * z) * z) * z)) | python | def ad_unif_inf(statistic):
"""
Approximates the limiting distribution to about 5 decimal digits.
"""
z = statistic
if z < 2:
return (exp(-1.2337141 / z) / sqrt(z) *
(2.00012 + (.247105 - (.0649821 - (.0347962 -
(.011672 - .00168691 * z) * z) * z) * z) * z))
else:
return exp(-exp(1.0776 - (2.30695 - (.43424 - (.082433 -
(.008056 - .0003146 * z) * z) * z) * z) * z)) | [
"def",
"ad_unif_inf",
"(",
"statistic",
")",
":",
"z",
"=",
"statistic",
"if",
"z",
"<",
"2",
":",
"return",
"(",
"exp",
"(",
"-",
"1.2337141",
"/",
"z",
")",
"/",
"sqrt",
"(",
"z",
")",
"*",
"(",
"2.00012",
"+",
"(",
".247105",
"-",
"(",
".064... | Approximates the limiting distribution to about 5 decimal digits. | [
"Approximates",
"the",
"limiting",
"distribution",
"to",
"about",
"5",
"decimal",
"digits",
"."
] | b950572758b9ebe38b9ea954ccc360d55cdf9c39 | https://github.com/wrwrwr/scikit-gof/blob/b950572758b9ebe38b9ea954ccc360d55cdf9c39/skgof/addist.py#L38-L49 | train | 48,876 |
wrwrwr/scikit-gof | skgof/addist.py | ad_unif_fix | def ad_unif_fix(samples, pinf):
"""
Corrects the limiting distribution for a finite sample size.
"""
n = samples
c = .01265 + .1757 / n
if pinf < c:
return (((.0037 / n + .00078) / n + .00006) / n) * g1(pinf / c)
elif pinf < .8:
return ((.01365 / n + .04213) / n) * g2((pinf - c) / (.8 - c))
else:
return g3(pinf) / n | python | def ad_unif_fix(samples, pinf):
"""
Corrects the limiting distribution for a finite sample size.
"""
n = samples
c = .01265 + .1757 / n
if pinf < c:
return (((.0037 / n + .00078) / n + .00006) / n) * g1(pinf / c)
elif pinf < .8:
return ((.01365 / n + .04213) / n) * g2((pinf - c) / (.8 - c))
else:
return g3(pinf) / n | [
"def",
"ad_unif_fix",
"(",
"samples",
",",
"pinf",
")",
":",
"n",
"=",
"samples",
"c",
"=",
".01265",
"+",
".1757",
"/",
"n",
"if",
"pinf",
"<",
"c",
":",
"return",
"(",
"(",
"(",
".0037",
"/",
"n",
"+",
".00078",
")",
"/",
"n",
"+",
".00006",
... | Corrects the limiting distribution for a finite sample size. | [
"Corrects",
"the",
"limiting",
"distribution",
"for",
"a",
"finite",
"sample",
"size",
"."
] | b950572758b9ebe38b9ea954ccc360d55cdf9c39 | https://github.com/wrwrwr/scikit-gof/blob/b950572758b9ebe38b9ea954ccc360d55cdf9c39/skgof/addist.py#L59-L70 | train | 48,877 |
jambonrose/markdown_superscript_extension | setup.py | CustomCheckCommand.check_metadata | def check_metadata(self):
"""Ensure all required meta-data are supplied.
Specifically: name, version, URL, author or maintainer
Warns if any are missing.
If enforce-email option is true, author and/or maintainer must
specify an email.
"""
metadata = self.distribution.metadata
missing = []
for attr in ("name", "version", "url"):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
# https://www.python.org/dev/peps/pep-0345/
# author or maintainer must be specified
# author is preferred; if identifcal, specify only author
if not metadata.author and not metadata.maintainer:
missing.append("author")
if self.enforce_email:
missing.append("author_email")
else:
# one or both of author or maintainer specified
if (
metadata.author
and self.enforce_email
and not metadata.author_email
):
missing.append("author_email")
if (
metadata.maintainer
and self.enforce_email
and not metadata.maintainer_email
):
missing.append("maintainer_email")
if (
metadata.author
and metadata.maintainer
and metadata.author == metadata.maintainer
):
self.warn(
"Maintainer should be omitted if identical to Author.\n"
"See https://www.python.org/dev/peps/pep-0345/"
"#maintainer-email-optional"
)
if (
metadata.author_email
and metadata.maintainer_email
and metadata.author_email == metadata.maintainer_email
):
self.warn(
"Maintainer Email should be omitted if"
"identical to Author's.\n"
"See https://www.python.org/dev/peps/pep-0345/"
"#maintainer-email-optional"
)
if missing:
self.warn("missing required meta-data: %s" % ", ".join(missing)) | python | def check_metadata(self):
"""Ensure all required meta-data are supplied.
Specifically: name, version, URL, author or maintainer
Warns if any are missing.
If enforce-email option is true, author and/or maintainer must
specify an email.
"""
metadata = self.distribution.metadata
missing = []
for attr in ("name", "version", "url"):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
# https://www.python.org/dev/peps/pep-0345/
# author or maintainer must be specified
# author is preferred; if identifcal, specify only author
if not metadata.author and not metadata.maintainer:
missing.append("author")
if self.enforce_email:
missing.append("author_email")
else:
# one or both of author or maintainer specified
if (
metadata.author
and self.enforce_email
and not metadata.author_email
):
missing.append("author_email")
if (
metadata.maintainer
and self.enforce_email
and not metadata.maintainer_email
):
missing.append("maintainer_email")
if (
metadata.author
and metadata.maintainer
and metadata.author == metadata.maintainer
):
self.warn(
"Maintainer should be omitted if identical to Author.\n"
"See https://www.python.org/dev/peps/pep-0345/"
"#maintainer-email-optional"
)
if (
metadata.author_email
and metadata.maintainer_email
and metadata.author_email == metadata.maintainer_email
):
self.warn(
"Maintainer Email should be omitted if"
"identical to Author's.\n"
"See https://www.python.org/dev/peps/pep-0345/"
"#maintainer-email-optional"
)
if missing:
self.warn("missing required meta-data: %s" % ", ".join(missing)) | [
"def",
"check_metadata",
"(",
"self",
")",
":",
"metadata",
"=",
"self",
".",
"distribution",
".",
"metadata",
"missing",
"=",
"[",
"]",
"for",
"attr",
"in",
"(",
"\"name\"",
",",
"\"version\"",
",",
"\"url\"",
")",
":",
"if",
"not",
"(",
"hasattr",
"(... | Ensure all required meta-data are supplied.
Specifically: name, version, URL, author or maintainer
Warns if any are missing.
If enforce-email option is true, author and/or maintainer must
specify an email. | [
"Ensure",
"all",
"required",
"meta",
"-",
"data",
"are",
"supplied",
"."
] | 82e500182036fd754cd12cb1a3a7f71e2eeb05b1 | https://github.com/jambonrose/markdown_superscript_extension/blob/82e500182036fd754cd12cb1a3a7f71e2eeb05b1/setup.py#L61-L122 | train | 48,878 |
quantopian/serializable-traitlets | straitlets/builtin_models.py | PostgresConfig.from_url | def from_url(cls, url):
"""
Construct a PostgresConfig from a URL.
"""
parsed = urlparse(url)
return cls(
username=parsed.username,
password=parsed.password,
hostname=parsed.hostname,
port=parsed.port,
database=parsed.path.lstrip('/'),
# Like parse_qs, but produces a scalar per key, instead of a list:
query_params=dict(param.split('=')
for param in parsed.query.split('&'))
if parsed.query else {},
) | python | def from_url(cls, url):
"""
Construct a PostgresConfig from a URL.
"""
parsed = urlparse(url)
return cls(
username=parsed.username,
password=parsed.password,
hostname=parsed.hostname,
port=parsed.port,
database=parsed.path.lstrip('/'),
# Like parse_qs, but produces a scalar per key, instead of a list:
query_params=dict(param.split('=')
for param in parsed.query.split('&'))
if parsed.query else {},
) | [
"def",
"from_url",
"(",
"cls",
",",
"url",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"return",
"cls",
"(",
"username",
"=",
"parsed",
".",
"username",
",",
"password",
"=",
"parsed",
".",
"password",
",",
"hostname",
"=",
"parsed",
".",
"... | Construct a PostgresConfig from a URL. | [
"Construct",
"a",
"PostgresConfig",
"from",
"a",
"URL",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/builtin_models.py#L78-L93 | train | 48,879 |
OnroerendErfgoed/oe_utils | oe_utils/utils/file_utils.py | get_last_modified_date | def get_last_modified_date(filename):
"""
Get the last modified date of a given file
:param filename: string: pathname of a file
:return: Date
"""
if os.path.isfile(filename):
t = os.path.getmtime(filename)
return datetime.date.fromtimestamp(t).strftime('%d/%m/%Y')
return None | python | def get_last_modified_date(filename):
"""
Get the last modified date of a given file
:param filename: string: pathname of a file
:return: Date
"""
if os.path.isfile(filename):
t = os.path.getmtime(filename)
return datetime.date.fromtimestamp(t).strftime('%d/%m/%Y')
return None | [
"def",
"get_last_modified_date",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"t",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"return",
"datetime",
".",
"date",
".",
"fromtimestamp",
... | Get the last modified date of a given file
:param filename: string: pathname of a file
:return: Date | [
"Get",
"the",
"last",
"modified",
"date",
"of",
"a",
"given",
"file"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/utils/file_utils.py#L7-L17 | train | 48,880 |
OnroerendErfgoed/oe_utils | oe_utils/utils/file_utils.py | get_file_size | def get_file_size(filename):
"""
Get the file size of a given file
:param filename: string: pathname of a file
:return: human readable filesize
"""
if os.path.isfile(filename):
return convert_size(os.path.getsize(filename))
return None | python | def get_file_size(filename):
"""
Get the file size of a given file
:param filename: string: pathname of a file
:return: human readable filesize
"""
if os.path.isfile(filename):
return convert_size(os.path.getsize(filename))
return None | [
"def",
"get_file_size",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"return",
"convert_size",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
")",
"return",
"None"
] | Get the file size of a given file
:param filename: string: pathname of a file
:return: human readable filesize | [
"Get",
"the",
"file",
"size",
"of",
"a",
"given",
"file"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/utils/file_utils.py#L20-L29 | train | 48,881 |
OnroerendErfgoed/oe_utils | oe_utils/utils/file_utils.py | convert_size | def convert_size(size_bytes):
"""
Transform bytesize to a human readable filesize
:param size_bytes: bytesize
:return: human readable filesize
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i]) | python | def convert_size(size_bytes):
"""
Transform bytesize to a human readable filesize
:param size_bytes: bytesize
:return: human readable filesize
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i]) | [
"def",
"convert_size",
"(",
"size_bytes",
")",
":",
"if",
"size_bytes",
"==",
"0",
":",
"return",
"\"0B\"",
"size_name",
"=",
"(",
"\"B\"",
",",
"\"KB\"",
",",
"\"MB\"",
",",
"\"GB\"",
",",
"\"TB\"",
",",
"\"PB\"",
",",
"\"EB\"",
",",
"\"ZB\"",
",",
"\... | Transform bytesize to a human readable filesize
:param size_bytes: bytesize
:return: human readable filesize | [
"Transform",
"bytesize",
"to",
"a",
"human",
"readable",
"filesize"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/utils/file_utils.py#L32-L45 | train | 48,882 |
quantopian/serializable-traitlets | straitlets/serializable.py | Serializable.validate_all_attributes | def validate_all_attributes(self):
"""
Force validation of all traits.
Useful for circumstances where an attribute won't be accessed until
well after construction, but we want to fail eagerly if that attribute
is passed incorrectly.
Consider using ``StrictSerializable`` for classes where you always want
this called on construction.
See Also
--------
StrictSerializable
"""
errors = {}
for name in self.trait_names():
try:
getattr(self, name)
except TraitError as e:
errors[name] = e
if errors:
raise MultipleTraitErrors(errors) | python | def validate_all_attributes(self):
"""
Force validation of all traits.
Useful for circumstances where an attribute won't be accessed until
well after construction, but we want to fail eagerly if that attribute
is passed incorrectly.
Consider using ``StrictSerializable`` for classes where you always want
this called on construction.
See Also
--------
StrictSerializable
"""
errors = {}
for name in self.trait_names():
try:
getattr(self, name)
except TraitError as e:
errors[name] = e
if errors:
raise MultipleTraitErrors(errors) | [
"def",
"validate_all_attributes",
"(",
"self",
")",
":",
"errors",
"=",
"{",
"}",
"for",
"name",
"in",
"self",
".",
"trait_names",
"(",
")",
":",
"try",
":",
"getattr",
"(",
"self",
",",
"name",
")",
"except",
"TraitError",
"as",
"e",
":",
"errors",
... | Force validation of all traits.
Useful for circumstances where an attribute won't be accessed until
well after construction, but we want to fail eagerly if that attribute
is passed incorrectly.
Consider using ``StrictSerializable`` for classes where you always want
this called on construction.
See Also
--------
StrictSerializable | [
"Force",
"validation",
"of",
"all",
"traits",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L84-L106 | train | 48,883 |
quantopian/serializable-traitlets | straitlets/serializable.py | Serializable.example_instance | def example_instance(cls, skip=()):
"""
Generate an example instance of a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
Traits with names in ``skip`` will not have example values set.
"""
kwargs = {}
for name, trait in iteritems(cls.class_traits()):
if name in skip:
continue
value = trait.example_value
if value is Undefined:
continue
kwargs[name] = value
return cls(**kwargs) | python | def example_instance(cls, skip=()):
"""
Generate an example instance of a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
Traits with names in ``skip`` will not have example values set.
"""
kwargs = {}
for name, trait in iteritems(cls.class_traits()):
if name in skip:
continue
value = trait.example_value
if value is Undefined:
continue
kwargs[name] = value
return cls(**kwargs) | [
"def",
"example_instance",
"(",
"cls",
",",
"skip",
"=",
"(",
")",
")",
":",
"kwargs",
"=",
"{",
"}",
"for",
"name",
",",
"trait",
"in",
"iteritems",
"(",
"cls",
".",
"class_traits",
"(",
")",
")",
":",
"if",
"name",
"in",
"skip",
":",
"continue",
... | Generate an example instance of a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
Traits with names in ``skip`` will not have example values set. | [
"Generate",
"an",
"example",
"instance",
"of",
"a",
"Serializable",
"subclass",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L136-L154 | train | 48,884 |
quantopian/serializable-traitlets | straitlets/serializable.py | Serializable.example_yaml | def example_yaml(cls, skip=()):
"""
Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
"""
return cls.example_instance(skip=skip).to_yaml(skip=skip) | python | def example_yaml(cls, skip=()):
"""
Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance.
"""
return cls.example_instance(skip=skip).to_yaml(skip=skip) | [
"def",
"example_yaml",
"(",
"cls",
",",
"skip",
"=",
"(",
")",
")",
":",
"return",
"cls",
".",
"example_instance",
"(",
"skip",
"=",
"skip",
")",
".",
"to_yaml",
"(",
"skip",
"=",
"skip",
")"
] | Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance. | [
"Generate",
"an",
"example",
"yaml",
"string",
"for",
"a",
"Serializable",
"subclass",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L157-L164 | train | 48,885 |
quantopian/serializable-traitlets | straitlets/serializable.py | Serializable.write_example_yaml | def write_example_yaml(cls, dest, skip=()):
"""
Write a file containing an example yaml string for a Serializable
subclass.
"""
# Make sure we can make an instance before we open a file.
inst = cls.example_instance(skip=skip)
with open(dest, 'w') as f:
inst.to_yaml(stream=f, skip=skip) | python | def write_example_yaml(cls, dest, skip=()):
"""
Write a file containing an example yaml string for a Serializable
subclass.
"""
# Make sure we can make an instance before we open a file.
inst = cls.example_instance(skip=skip)
with open(dest, 'w') as f:
inst.to_yaml(stream=f, skip=skip) | [
"def",
"write_example_yaml",
"(",
"cls",
",",
"dest",
",",
"skip",
"=",
"(",
")",
")",
":",
"# Make sure we can make an instance before we open a file.",
"inst",
"=",
"cls",
".",
"example_instance",
"(",
"skip",
"=",
"skip",
")",
"with",
"open",
"(",
"dest",
"... | Write a file containing an example yaml string for a Serializable
subclass. | [
"Write",
"a",
"file",
"containing",
"an",
"example",
"yaml",
"string",
"for",
"a",
"Serializable",
"subclass",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L167-L175 | train | 48,886 |
quantopian/serializable-traitlets | straitlets/serializable.py | Serializable.to_base64 | def to_base64(self, skip=()):
"""
Construct from base64-encoded JSON.
"""
return base64.b64encode(
ensure_bytes(
self.to_json(skip=skip),
encoding='utf-8',
)
) | python | def to_base64(self, skip=()):
"""
Construct from base64-encoded JSON.
"""
return base64.b64encode(
ensure_bytes(
self.to_json(skip=skip),
encoding='utf-8',
)
) | [
"def",
"to_base64",
"(",
"self",
",",
"skip",
"=",
"(",
")",
")",
":",
"return",
"base64",
".",
"b64encode",
"(",
"ensure_bytes",
"(",
"self",
".",
"to_json",
"(",
"skip",
"=",
"skip",
")",
",",
"encoding",
"=",
"'utf-8'",
",",
")",
")"
] | Construct from base64-encoded JSON. | [
"Construct",
"from",
"base64",
"-",
"encoded",
"JSON",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/serializable.py#L219-L228 | train | 48,887 |
OnroerendErfgoed/oe_utils | oe_utils/data/data_managers.py | DataManager.delete | def delete(self, object_id):
"""
Delete an object by its id
:param object_id: the objects id.
:return: the deleted object
:raises: :class: NoResultFound when the object could not be found
"""
obj = self.session.query(self.cls).filter_by(id=object_id).one()
self.session.delete(obj)
return obj | python | def delete(self, object_id):
"""
Delete an object by its id
:param object_id: the objects id.
:return: the deleted object
:raises: :class: NoResultFound when the object could not be found
"""
obj = self.session.query(self.cls).filter_by(id=object_id).one()
self.session.delete(obj)
return obj | [
"def",
"delete",
"(",
"self",
",",
"object_id",
")",
":",
"obj",
"=",
"self",
".",
"session",
".",
"query",
"(",
"self",
".",
"cls",
")",
".",
"filter_by",
"(",
"id",
"=",
"object_id",
")",
".",
"one",
"(",
")",
"self",
".",
"session",
".",
"dele... | Delete an object by its id
:param object_id: the objects id.
:return: the deleted object
:raises: :class: NoResultFound when the object could not be found | [
"Delete",
"an",
"object",
"by",
"its",
"id"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/data/data_managers.py#L95-L105 | train | 48,888 |
OnroerendErfgoed/oe_utils | oe_utils/data/data_managers.py | DataManager.save | def save(self, obj):
"""
save an object
:param obj: the object
:return: the saved object
"""
if obj not in self.session:
self.session.add(obj)
else:
obj = self.session.merge(obj)
self.session.flush()
self.session.refresh(obj)
return obj | python | def save(self, obj):
"""
save an object
:param obj: the object
:return: the saved object
"""
if obj not in self.session:
self.session.add(obj)
else:
obj = self.session.merge(obj)
self.session.flush()
self.session.refresh(obj)
return obj | [
"def",
"save",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"not",
"in",
"self",
".",
"session",
":",
"self",
".",
"session",
".",
"add",
"(",
"obj",
")",
"else",
":",
"obj",
"=",
"self",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"self"... | save an object
:param obj: the object
:return: the saved object | [
"save",
"an",
"object"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/data/data_managers.py#L107-L120 | train | 48,889 |
OnroerendErfgoed/oe_utils | oe_utils/search/query_builder.py | QueryBuilder.add_named_concept_filters | def add_named_concept_filters(self, named_filter_concepts):
"""
Adds named concept filters
:param named_filter_concepts: dict with named filter concepts which will be mapped as the key as query param and the value as search string
"""
for concept_key, concept_name in named_filter_concepts.items():
self.add_concept_filter(concept_key, concept_name=concept_name) | python | def add_named_concept_filters(self, named_filter_concepts):
"""
Adds named concept filters
:param named_filter_concepts: dict with named filter concepts which will be mapped as the key as query param and the value as search string
"""
for concept_key, concept_name in named_filter_concepts.items():
self.add_concept_filter(concept_key, concept_name=concept_name) | [
"def",
"add_named_concept_filters",
"(",
"self",
",",
"named_filter_concepts",
")",
":",
"for",
"concept_key",
",",
"concept_name",
"in",
"named_filter_concepts",
".",
"items",
"(",
")",
":",
"self",
".",
"add_concept_filter",
"(",
"concept_key",
",",
"concept_name"... | Adds named concept filters
:param named_filter_concepts: dict with named filter concepts which will be mapped as the key as query param and the value as search string | [
"Adds",
"named",
"concept",
"filters"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/query_builder.py#L27-L34 | train | 48,890 |
OnroerendErfgoed/oe_utils | oe_utils/search/query_builder.py | QueryBuilder.add_concept_filter | def add_concept_filter(self, concept, concept_name=None):
"""
Add a concept filter
:param concept: concept which will be used as lowercase string in a search term
:param concept_name: name of the place where there will be searched for
"""
if concept in self.query_params.keys():
if not concept_name:
concept_name = concept
if isinstance(self.query_params[concept], list):
if self.es_version == '1':
es_filter = {'or': []}
for or_filter in self.query_params[concept]:
es_filter['or'].append(self._build_concept_term(concept_name, or_filter))
else:
es_filter = {"bool": {"should": []}}
for or_filter in self.query_params[concept]:
es_filter["bool"]["should"].append(self._build_concept_term(concept_name, or_filter))
else:
es_filter = self._build_concept_term(concept_name, self.query_params[concept])
self.filters.append(es_filter) | python | def add_concept_filter(self, concept, concept_name=None):
"""
Add a concept filter
:param concept: concept which will be used as lowercase string in a search term
:param concept_name: name of the place where there will be searched for
"""
if concept in self.query_params.keys():
if not concept_name:
concept_name = concept
if isinstance(self.query_params[concept], list):
if self.es_version == '1':
es_filter = {'or': []}
for or_filter in self.query_params[concept]:
es_filter['or'].append(self._build_concept_term(concept_name, or_filter))
else:
es_filter = {"bool": {"should": []}}
for or_filter in self.query_params[concept]:
es_filter["bool"]["should"].append(self._build_concept_term(concept_name, or_filter))
else:
es_filter = self._build_concept_term(concept_name, self.query_params[concept])
self.filters.append(es_filter) | [
"def",
"add_concept_filter",
"(",
"self",
",",
"concept",
",",
"concept_name",
"=",
"None",
")",
":",
"if",
"concept",
"in",
"self",
".",
"query_params",
".",
"keys",
"(",
")",
":",
"if",
"not",
"concept_name",
":",
"concept_name",
"=",
"concept",
"if",
... | Add a concept filter
:param concept: concept which will be used as lowercase string in a search term
:param concept_name: name of the place where there will be searched for | [
"Add",
"a",
"concept",
"filter"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/query_builder.py#L45-L66 | train | 48,891 |
OnroerendErfgoed/oe_utils | oe_utils/search/query_builder.py | QueryBuilder.build | def build(self):
"""
Builds the query string, which can be used for a search query
:return: the query string
"""
if self.es_version == '1':
if len(self.filters) > 0:
return {
'filtered': {
'query': self.query,
'filter': {
'and': self.filters
}
}
}
else:
return self.query
else:
query = {
'bool': {
'must': self.query
}
}
if len(self.filters) > 0:
query["bool"]["filter"] = self.filters
return query | python | def build(self):
"""
Builds the query string, which can be used for a search query
:return: the query string
"""
if self.es_version == '1':
if len(self.filters) > 0:
return {
'filtered': {
'query': self.query,
'filter': {
'and': self.filters
}
}
}
else:
return self.query
else:
query = {
'bool': {
'must': self.query
}
}
if len(self.filters) > 0:
query["bool"]["filter"] = self.filters
return query | [
"def",
"build",
"(",
"self",
")",
":",
"if",
"self",
".",
"es_version",
"==",
"'1'",
":",
"if",
"len",
"(",
"self",
".",
"filters",
")",
">",
"0",
":",
"return",
"{",
"'filtered'",
":",
"{",
"'query'",
":",
"self",
".",
"query",
",",
"'filter'",
... | Builds the query string, which can be used for a search query
:return: the query string | [
"Builds",
"the",
"query",
"string",
"which",
"can",
"be",
"used",
"for",
"a",
"search",
"query"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/search/query_builder.py#L74-L100 | train | 48,892 |
OnroerendErfgoed/oe_utils | oe_utils/range_parser.py | Range.parse | def parse(cls, request, default_start=0, default_end=9, max_end=50):
'''
Parse the range headers into a range object. When there are no range headers,
check for a page 'pagina' parameter, otherwise use the defaults defaults
:param request: a request object
:param default_start: default start for paging (optional, default is 0)
:param default_end: default end for paging (optional, default is 9)
:param max_end: maximum end for paging (optional, default is 50,
no limits in case of None)
:return: :class: 'oe_utils.range_parser.Range'
'''
settings = request.registry.settings
page_param = settings.get('oe.paging.page.queryparam', 'pagina')
if 'Range' in request.headers and request.headers['Range'] is not '':
match = re.match('^items=([0-9]+)-([0-9]+)$', request.headers['Range'])
if match:
start = int(match.group(1))
end = int(match.group(2))
if end < start:
end = start
if max_end and end > start + max_end:
end = start + max_end
return cls(start, end)
else:
raise RangeParseException('range header does not match expected format')
elif page_param in request.params:
per_page_param = settings.get('oe.paging.per_page.queryparam',
'per_pagina')
page = int(request.params.get(page_param))
items_per_page = int(
request.params.get(per_page_param,
default_end - default_start + 1)
)
start = default_start + items_per_page * (page - 1)
end = start + items_per_page - 1
return cls(start, end, page)
else:
return cls(default_start, default_end) | python | def parse(cls, request, default_start=0, default_end=9, max_end=50):
'''
Parse the range headers into a range object. When there are no range headers,
check for a page 'pagina' parameter, otherwise use the defaults defaults
:param request: a request object
:param default_start: default start for paging (optional, default is 0)
:param default_end: default end for paging (optional, default is 9)
:param max_end: maximum end for paging (optional, default is 50,
no limits in case of None)
:return: :class: 'oe_utils.range_parser.Range'
'''
settings = request.registry.settings
page_param = settings.get('oe.paging.page.queryparam', 'pagina')
if 'Range' in request.headers and request.headers['Range'] is not '':
match = re.match('^items=([0-9]+)-([0-9]+)$', request.headers['Range'])
if match:
start = int(match.group(1))
end = int(match.group(2))
if end < start:
end = start
if max_end and end > start + max_end:
end = start + max_end
return cls(start, end)
else:
raise RangeParseException('range header does not match expected format')
elif page_param in request.params:
per_page_param = settings.get('oe.paging.per_page.queryparam',
'per_pagina')
page = int(request.params.get(page_param))
items_per_page = int(
request.params.get(per_page_param,
default_end - default_start + 1)
)
start = default_start + items_per_page * (page - 1)
end = start + items_per_page - 1
return cls(start, end, page)
else:
return cls(default_start, default_end) | [
"def",
"parse",
"(",
"cls",
",",
"request",
",",
"default_start",
"=",
"0",
",",
"default_end",
"=",
"9",
",",
"max_end",
"=",
"50",
")",
":",
"settings",
"=",
"request",
".",
"registry",
".",
"settings",
"page_param",
"=",
"settings",
".",
"get",
"(",... | Parse the range headers into a range object. When there are no range headers,
check for a page 'pagina' parameter, otherwise use the defaults defaults
:param request: a request object
:param default_start: default start for paging (optional, default is 0)
:param default_end: default end for paging (optional, default is 9)
:param max_end: maximum end for paging (optional, default is 50,
no limits in case of None)
:return: :class: 'oe_utils.range_parser.Range' | [
"Parse",
"the",
"range",
"headers",
"into",
"a",
"range",
"object",
".",
"When",
"there",
"are",
"no",
"range",
"headers",
"check",
"for",
"a",
"page",
"pagina",
"parameter",
"otherwise",
"use",
"the",
"defaults",
"defaults"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/range_parser.py#L42-L82 | train | 48,893 |
OnroerendErfgoed/oe_utils | oe_utils/range_parser.py | Range.set_response_headers | def set_response_headers(self, request, total_count):
'''
Set the correct range headers on the response
:param request: a request object
:param total_count: the total number of results
'''
response = request.response
response.headerlist.append(('Access-Control-Expose-Headers', 'Content-Range, X-Content-Range'))
response.accept_ranges = 'items'
if total_count is None:
raise RangeParseException('Provided length value is null')
if total_count > 0:
response.content_range = self.content_range(total_count)
self.set_link_headers(request, total_count) | python | def set_response_headers(self, request, total_count):
'''
Set the correct range headers on the response
:param request: a request object
:param total_count: the total number of results
'''
response = request.response
response.headerlist.append(('Access-Control-Expose-Headers', 'Content-Range, X-Content-Range'))
response.accept_ranges = 'items'
if total_count is None:
raise RangeParseException('Provided length value is null')
if total_count > 0:
response.content_range = self.content_range(total_count)
self.set_link_headers(request, total_count) | [
"def",
"set_response_headers",
"(",
"self",
",",
"request",
",",
"total_count",
")",
":",
"response",
"=",
"request",
".",
"response",
"response",
".",
"headerlist",
".",
"append",
"(",
"(",
"'Access-Control-Expose-Headers'",
",",
"'Content-Range, X-Content-Range'",
... | Set the correct range headers on the response
:param request: a request object
:param total_count: the total number of results | [
"Set",
"the",
"correct",
"range",
"headers",
"on",
"the",
"response"
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/range_parser.py#L84-L99 | train | 48,894 |
OnroerendErfgoed/oe_utils | oe_utils/range_parser.py | Range.set_link_headers | def set_link_headers(self, request, total_count):
"""
Sets Link headers on the response.
When the Range header is present in the request no Link headers will
be added.
4 links will be added: first, prev, next, last.
If the current page is already the first page, the prev link will
not be present.
If the current page is already the last page, the next link will
not be present.
:param request: A request object
:param total_count: The total amount of items available before paging
"""
response = request.response
if request.headers.get('Range'):
# Don't set the Link headers when custom ranges were used.
return
settings = request.registry.settings
page_param = settings.get('oe.paging.page.queryparam', 'pagina')
per_page_param = settings.get('oe.paging.per_page.queryparam',
'per_pagina')
url = request.path_url
try:
queryparams = request.params.mixed()
except AttributeError:
queryparams = request.params
page_size = self.get_page_size()
current_page = self.start // page_size + 1
queryparams[per_page_param] = page_size
links = {
'first': 1,
'last': int(math.ceil(float(total_count) / page_size))
}
if current_page != links['first']:
links['prev'] = current_page - 1
if current_page != links['last']:
links['next'] = current_page + 1
response.headers['Link'] = self._make_link_headers(links, page_param,
queryparams, url) | python | def set_link_headers(self, request, total_count):
"""
Sets Link headers on the response.
When the Range header is present in the request no Link headers will
be added.
4 links will be added: first, prev, next, last.
If the current page is already the first page, the prev link will
not be present.
If the current page is already the last page, the next link will
not be present.
:param request: A request object
:param total_count: The total amount of items available before paging
"""
response = request.response
if request.headers.get('Range'):
# Don't set the Link headers when custom ranges were used.
return
settings = request.registry.settings
page_param = settings.get('oe.paging.page.queryparam', 'pagina')
per_page_param = settings.get('oe.paging.per_page.queryparam',
'per_pagina')
url = request.path_url
try:
queryparams = request.params.mixed()
except AttributeError:
queryparams = request.params
page_size = self.get_page_size()
current_page = self.start // page_size + 1
queryparams[per_page_param] = page_size
links = {
'first': 1,
'last': int(math.ceil(float(total_count) / page_size))
}
if current_page != links['first']:
links['prev'] = current_page - 1
if current_page != links['last']:
links['next'] = current_page + 1
response.headers['Link'] = self._make_link_headers(links, page_param,
queryparams, url) | [
"def",
"set_link_headers",
"(",
"self",
",",
"request",
",",
"total_count",
")",
":",
"response",
"=",
"request",
".",
"response",
"if",
"request",
".",
"headers",
".",
"get",
"(",
"'Range'",
")",
":",
"# Don't set the Link headers when custom ranges were used.",
... | Sets Link headers on the response.
When the Range header is present in the request no Link headers will
be added.
4 links will be added: first, prev, next, last.
If the current page is already the first page, the prev link will
not be present.
If the current page is already the last page, the next link will
not be present.
:param request: A request object
:param total_count: The total amount of items available before paging | [
"Sets",
"Link",
"headers",
"on",
"the",
"response",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/range_parser.py#L101-L143 | train | 48,895 |
OnroerendErfgoed/oe_utils | oe_utils/validation/validators_actor.py | KBOSchemaNode.preparer | def preparer(value):
'''
Edit a value to a value that can be validated as a
kbo number.
'''
if value is None or value == colander.null:
return colander.null
return value.strip().replace('.', '') | python | def preparer(value):
'''
Edit a value to a value that can be validated as a
kbo number.
'''
if value is None or value == colander.null:
return colander.null
return value.strip().replace('.', '') | [
"def",
"preparer",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"value",
"==",
"colander",
".",
"null",
":",
"return",
"colander",
".",
"null",
"return",
"value",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"'.'",
",",
"''",
")"
] | Edit a value to a value that can be validated as a
kbo number. | [
"Edit",
"a",
"value",
"to",
"a",
"value",
"that",
"can",
"be",
"validated",
"as",
"a",
"kbo",
"number",
"."
] | 7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d | https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/validation/validators_actor.py#L57-L64 | train | 48,896 |
wrwrwr/scikit-gof | skgof/cvmdist.py | cvm_unif_inf | def cvm_unif_inf(statistic):
"""
Calculates the limiting distribution of the Cramer-von Mises statistic.
After the second line of equation 1.3 from the Csorgo and Faraway paper.
"""
args = inf_args / statistic
return (inf_cs * exp(-args) * kv(.25, args)).sum() / statistic ** .5 | python | def cvm_unif_inf(statistic):
"""
Calculates the limiting distribution of the Cramer-von Mises statistic.
After the second line of equation 1.3 from the Csorgo and Faraway paper.
"""
args = inf_args / statistic
return (inf_cs * exp(-args) * kv(.25, args)).sum() / statistic ** .5 | [
"def",
"cvm_unif_inf",
"(",
"statistic",
")",
":",
"args",
"=",
"inf_args",
"/",
"statistic",
"return",
"(",
"inf_cs",
"*",
"exp",
"(",
"-",
"args",
")",
"*",
"kv",
"(",
".25",
",",
"args",
")",
")",
".",
"sum",
"(",
")",
"/",
"statistic",
"**",
... | Calculates the limiting distribution of the Cramer-von Mises statistic.
After the second line of equation 1.3 from the Csorgo and Faraway paper. | [
"Calculates",
"the",
"limiting",
"distribution",
"of",
"the",
"Cramer",
"-",
"von",
"Mises",
"statistic",
"."
] | b950572758b9ebe38b9ea954ccc360d55cdf9c39 | https://github.com/wrwrwr/scikit-gof/blob/b950572758b9ebe38b9ea954ccc360d55cdf9c39/skgof/cvmdist.py#L48-L55 | train | 48,897 |
wrwrwr/scikit-gof | skgof/cvmdist.py | cvm_unif_fix1 | def cvm_unif_fix1(statistic):
"""
Approximates the first-term of the small sample count Gotze expansion.
After equation 1.10 (with coefficients pulled out as csa / csb).
"""
args = fix1_args / statistic
kvs = kv((.25, .75, 1.25), args[:, :, newaxis])
gs, hs = exp(-args) * tensordot(((1, 1, 0), (2, 3, -1)), kvs, axes=(1, 2))
a = dot((7, 16, 7), fix1_csa * gs).sum() / statistic ** 1.5
b = dot((1, 0, 24), fix1_csb * hs).sum() / statistic ** 2.5
return cvm_unif_inf(statistic) / 12 - a - b | python | def cvm_unif_fix1(statistic):
"""
Approximates the first-term of the small sample count Gotze expansion.
After equation 1.10 (with coefficients pulled out as csa / csb).
"""
args = fix1_args / statistic
kvs = kv((.25, .75, 1.25), args[:, :, newaxis])
gs, hs = exp(-args) * tensordot(((1, 1, 0), (2, 3, -1)), kvs, axes=(1, 2))
a = dot((7, 16, 7), fix1_csa * gs).sum() / statistic ** 1.5
b = dot((1, 0, 24), fix1_csb * hs).sum() / statistic ** 2.5
return cvm_unif_inf(statistic) / 12 - a - b | [
"def",
"cvm_unif_fix1",
"(",
"statistic",
")",
":",
"args",
"=",
"fix1_args",
"/",
"statistic",
"kvs",
"=",
"kv",
"(",
"(",
".25",
",",
".75",
",",
"1.25",
")",
",",
"args",
"[",
":",
",",
":",
",",
"newaxis",
"]",
")",
"gs",
",",
"hs",
"=",
"e... | Approximates the first-term of the small sample count Gotze expansion.
After equation 1.10 (with coefficients pulled out as csa / csb). | [
"Approximates",
"the",
"first",
"-",
"term",
"of",
"the",
"small",
"sample",
"count",
"Gotze",
"expansion",
"."
] | b950572758b9ebe38b9ea954ccc360d55cdf9c39 | https://github.com/wrwrwr/scikit-gof/blob/b950572758b9ebe38b9ea954ccc360d55cdf9c39/skgof/cvmdist.py#L64-L75 | train | 48,898 |
quantopian/serializable-traitlets | straitlets/utils.py | merge | def merge(*ds):
"""
Merge together a sequence if dictionaries.
Later entries overwrite values from earlier entries.
>>> merge({'a': 'b', 'c': 'd'}, {'a': 'z', 'e': 'f'})
{'a': 'z', 'c': 'd', 'e': 'f'}
"""
if not ds:
raise ValueError("Must provide at least one dict to merge().")
out = {}
for d in ds:
out.update(d)
return out | python | def merge(*ds):
"""
Merge together a sequence if dictionaries.
Later entries overwrite values from earlier entries.
>>> merge({'a': 'b', 'c': 'd'}, {'a': 'z', 'e': 'f'})
{'a': 'z', 'c': 'd', 'e': 'f'}
"""
if not ds:
raise ValueError("Must provide at least one dict to merge().")
out = {}
for d in ds:
out.update(d)
return out | [
"def",
"merge",
"(",
"*",
"ds",
")",
":",
"if",
"not",
"ds",
":",
"raise",
"ValueError",
"(",
"\"Must provide at least one dict to merge().\"",
")",
"out",
"=",
"{",
"}",
"for",
"d",
"in",
"ds",
":",
"out",
".",
"update",
"(",
"d",
")",
"return",
"out"... | Merge together a sequence if dictionaries.
Later entries overwrite values from earlier entries.
>>> merge({'a': 'b', 'c': 'd'}, {'a': 'z', 'e': 'f'})
{'a': 'z', 'c': 'd', 'e': 'f'} | [
"Merge",
"together",
"a",
"sequence",
"if",
"dictionaries",
"."
] | dd334366d1130825aea55d3dfecd6756973594e0 | https://github.com/quantopian/serializable-traitlets/blob/dd334366d1130825aea55d3dfecd6756973594e0/straitlets/utils.py#L2-L16 | train | 48,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.