repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
dmgass/baseline | baseline/_baseline.py | https://github.com/dmgass/baseline/blob/1f7988e8c9fafa83eb3a1ce73b1601d2afdbb2cd/baseline/_baseline.py#L289-L321 | def _atexit_callback(cls):
"""Create Python script copies with updated baselines.
For any baseline that had a miscompare, make a copy of the
source file which contained the baseline and update the
baseline with the new string value.
:returns:
record of every Python file update (key=path,
value=script instance)
:rtype: dict
"""
updated_scripts = {}
for baseline in cls._baselines_to_update:
if baseline.z__path.endswith('<stdin>'):
continue
try:
script = updated_scripts[baseline.z__path]
except KeyError:
script = Script(baseline.z__path)
updated_scripts[baseline.z__path] = script
script.add_update(baseline.z__linenum, baseline.z__update)
for key in sorted(updated_scripts):
script = updated_scripts[key]
script.update()
return updated_scripts | [
"def",
"_atexit_callback",
"(",
"cls",
")",
":",
"updated_scripts",
"=",
"{",
"}",
"for",
"baseline",
"in",
"cls",
".",
"_baselines_to_update",
":",
"if",
"baseline",
".",
"z__path",
".",
"endswith",
"(",
"'<stdin>'",
")",
":",
"continue",
"try",
":",
"scr... | Create Python script copies with updated baselines.
For any baseline that had a miscompare, make a copy of the
source file which contained the baseline and update the
baseline with the new string value.
:returns:
record of every Python file update (key=path,
value=script instance)
:rtype: dict | [
"Create",
"Python",
"script",
"copies",
"with",
"updated",
"baselines",
"."
] | python | train |
joferkington/mpldatacursor | mpldatacursor/datacursor.py | https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L658-L719 | def _select(self, event):
"""This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist."""
def event_axes_data(event, ax):
"""Creates a new event will have xdata and ydata based on *ax*."""
# We need to redefine event.xdata and event.ydata for twinned axes
# to work correctly
point = event.x, event.y
x, y = ax.transData.inverted().transform_point(point)
event = copy.copy(event)
event.xdata, event.ydata = x, y
return event
def contains(artist, event):
"""Need to ensure we don't trigger a pick event for axes in a
different figure. Otherwise, picking on one figure will trigger a
datacursor in another figure."""
if event.canvas is artist.figure.canvas:
return artist.contains(event)
else:
return False, {}
# If we're on top of an annotation box, hide it if right-clicked or
# do nothing if we're in draggable mode
for anno in list(self.annotations.values()):
fixed_event = event_axes_data(event, anno.axes)
if contains(anno, fixed_event)[0]:
if event.button == self.hide_button:
self._hide_box(anno)
elif self.draggable:
return
for artist in self.artists:
fixed_event = event_axes_data(event, artist.axes)
inside, info = contains(artist, fixed_event)
if inside:
fig = artist.figure
new_event = PickEvent('pick_event', fig.canvas, fixed_event,
artist, **info)
self(new_event)
# Only fire a single pick event for one mouseevent. Otherwise
# we'll need timers, etc to avoid multiple calls
break
# Not hovering over anything...
if self.hover:
artists = itertools.chain(self.artists, self.annotations.values())
over_something = [contains(artist, event)[0] for artist in artists]
if not any(over_something):
self.hide() | [
"def",
"_select",
"(",
"self",
",",
"event",
")",
":",
"def",
"event_axes_data",
"(",
"event",
",",
"ax",
")",
":",
"\"\"\"Creates a new event will have xdata and ydata based on *ax*.\"\"\"",
"# We need to redefine event.xdata and event.ydata for twinned axes",
"# to work correct... | This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist. | [
"This",
"is",
"basically",
"a",
"proxy",
"to",
"trigger",
"a",
"pick",
"event",
".",
"This",
"function",
"is",
"connected",
"to",
"either",
"a",
"mouse",
"motion",
"or",
"mouse",
"button",
"event",
"(",
"see",
"self",
".",
"enable",
")",
"depending",
"on... | python | train |
Kronuz/pyScss | scss/extension/compass/images.py | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/images.py#L45-L195 | def _image_url(path, only_path=False, cache_buster=True, dst_color=None, src_color=None, inline=False, mime_type=None, spacing=None, collapse_x=None, collapse_y=None):
"""
src_color - a list of or a single color to be replaced by each corresponding dst_color colors
spacing - spaces to be added to the image
collapse_x, collapse_y - collapsable (layered) image of the given size (x, y)
"""
if inline or dst_color or spacing:
if not Image:
raise SassMissingDependency('PIL', 'image manipulation')
filepath = String.unquoted(path).value
fileext = os.path.splitext(filepath)[1].lstrip('.').lower()
if mime_type:
mime_type = String.unquoted(mime_type).value
if not mime_type:
mime_type = mimetypes.guess_type(filepath)[0]
if not mime_type:
mime_type = 'image/%s' % fileext
path = None
IMAGES_ROOT = _images_root()
if callable(IMAGES_ROOT):
try:
_file, _storage = list(IMAGES_ROOT(filepath))[0]
except IndexError:
filetime = None
else:
filetime = getmtime(_file, _storage)
if filetime is None:
filetime = 'NA'
elif inline or dst_color or spacing:
path = _storage.open(_file)
else:
_path = os.path.join(IMAGES_ROOT.rstrip(os.sep), filepath.strip('\\/'))
filetime = getmtime(_path)
if filetime is None:
filetime = 'NA'
elif inline or dst_color or spacing:
path = open(_path, 'rb')
BASE_URL = config.IMAGES_URL or config.STATIC_URL
if path:
dst_colors = [list(Color(v).value[:3]) for v in List.from_maybe(dst_color) if v]
src_color = Color.from_name('black') if src_color is None else src_color
src_colors = [tuple(Color(v).value[:3]) for v in List.from_maybe(src_color)]
len_colors = max(len(dst_colors), len(src_colors))
dst_colors = (dst_colors * len_colors)[:len_colors]
src_colors = (src_colors * len_colors)[:len_colors]
spacing = Number(0) if spacing is None else spacing
spacing = [int(Number(v).value) for v in List.from_maybe(spacing)]
spacing = (spacing * 4)[:4]
file_name, file_ext = os.path.splitext(os.path.normpath(filepath).replace(os.sep, '_'))
key = (filetime, src_color, dst_color, spacing)
asset_file = file_name + '-' + make_filename_hash(key) + file_ext
ASSETS_ROOT = _assets_root()
asset_path = os.path.join(ASSETS_ROOT, asset_file)
if os.path.exists(asset_path):
filepath = asset_file
BASE_URL = config.ASSETS_URL
if inline:
path = open(asset_path, 'rb')
url = make_data_url(mime_type, path.read())
else:
url = '%s%s' % (BASE_URL, filepath)
if cache_buster:
filetime = getmtime(asset_path)
url = add_cache_buster(url, filetime)
else:
simply_process = False
image = None
if fileext in ('cur',):
simply_process = True
else:
try:
image = Image.open(path)
except IOError:
if not collapse_x and not collapse_y and not dst_colors:
simply_process = True
if simply_process:
if inline:
url = make_data_url(mime_type, path.read())
else:
url = '%s%s' % (BASE_URL, filepath)
if cache_buster:
filetime = getmtime(asset_path)
url = add_cache_buster(url, filetime)
else:
width, height = collapse_x or image.size[0], collapse_y or image.size[1]
new_image = Image.new(
mode='RGBA',
size=(width + spacing[1] + spacing[3], height + spacing[0] + spacing[2]),
color=(0, 0, 0, 0)
)
for i, dst_color in enumerate(dst_colors):
src_color = src_colors[i]
pixdata = image.load()
for _y in xrange(image.size[1]):
for _x in xrange(image.size[0]):
pixel = pixdata[_x, _y]
if pixel[:3] == src_color:
pixdata[_x, _y] = tuple([int(c) for c in dst_color] + [pixel[3] if len(pixel) == 4 else 255])
iwidth, iheight = image.size
if iwidth != width or iheight != height:
cy = 0
while cy < iheight:
cx = 0
while cx < iwidth:
cropped_image = image.crop((cx, cy, cx + width, cy + height))
new_image.paste(cropped_image, (int(spacing[3]), int(spacing[0])), cropped_image)
cx += width
cy += height
else:
new_image.paste(image, (int(spacing[3]), int(spacing[0])))
if not inline:
try:
new_image.save(asset_path)
filepath = asset_file
BASE_URL = config.ASSETS_URL
if cache_buster:
filetime = getmtime(asset_path)
except IOError:
log.exception("Error while saving image")
inline = True # Retry inline version
url = os.path.join(config.ASSETS_URL.rstrip(os.sep), asset_file.lstrip(os.sep))
if cache_buster:
url = add_cache_buster(url, filetime)
if inline:
output = six.BytesIO()
new_image.save(output, format='PNG')
contents = output.getvalue()
output.close()
url = make_data_url(mime_type, contents)
else:
url = os.path.join(BASE_URL.rstrip('/'), filepath.lstrip('\\/'))
if cache_buster and filetime != 'NA':
url = add_cache_buster(url, filetime)
if not os.sep == '/':
url = url.replace(os.sep, '/')
if only_path:
return String.unquoted(url)
else:
return Url.unquoted(url) | [
"def",
"_image_url",
"(",
"path",
",",
"only_path",
"=",
"False",
",",
"cache_buster",
"=",
"True",
",",
"dst_color",
"=",
"None",
",",
"src_color",
"=",
"None",
",",
"inline",
"=",
"False",
",",
"mime_type",
"=",
"None",
",",
"spacing",
"=",
"None",
"... | src_color - a list of or a single color to be replaced by each corresponding dst_color colors
spacing - spaces to be added to the image
collapse_x, collapse_y - collapsable (layered) image of the given size (x, y) | [
"src_color",
"-",
"a",
"list",
"of",
"or",
"a",
"single",
"color",
"to",
"be",
"replaced",
"by",
"each",
"corresponding",
"dst_color",
"colors",
"spacing",
"-",
"spaces",
"to",
"be",
"added",
"to",
"the",
"image",
"collapse_x",
"collapse_y",
"-",
"collapsabl... | python | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/case_mixin.py | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/case_mixin.py#L42-L61 | def case(self, case_id=None):
"""Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
A Case object
"""
if case_id:
for case in self.case_objs:
if case.case_id == case_id:
return case
else:
if self.cases:
return list(self.case_objs)[0]
return Case(case_id='unknown') | [
"def",
"case",
"(",
"self",
",",
"case_id",
"=",
"None",
")",
":",
"if",
"case_id",
":",
"for",
"case",
"in",
"self",
".",
"case_objs",
":",
"if",
"case",
".",
"case_id",
"==",
"case_id",
":",
"return",
"case",
"else",
":",
"if",
"self",
".",
"case... | Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
A Case object | [
"Return",
"a",
"Case",
"object"
] | python | train |
refnode/liquid | src/liquid/strscan.py | https://github.com/refnode/liquid/blob/8b2b5efc635b0dbfe610db9036fdb4ae3e3d5439/src/liquid/strscan.py#L470-L483 | def exists(self, regex):
"""
See what :meth:`skip_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.exists(' ')
5
>>> s.pos
0
Returns the number of characters matched if it does exist, or ``None``
otherwise.
"""
return self.search_full(regex, return_string=False, advance_pointer=False) | [
"def",
"exists",
"(",
"self",
",",
"regex",
")",
":",
"return",
"self",
".",
"search_full",
"(",
"regex",
",",
"return_string",
"=",
"False",
",",
"advance_pointer",
"=",
"False",
")"
] | See what :meth:`skip_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.exists(' ')
5
>>> s.pos
0
Returns the number of characters matched if it does exist, or ``None``
otherwise. | [
"See",
"what",
":",
"meth",
":",
"skip_until",
"would",
"return",
"without",
"advancing",
"the",
"pointer",
"."
] | python | train |
timkpaine/pyEX | pyEX/stocks.py | https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L476-L481 | def _companyToDF(c, token='', version=''):
'''internal'''
df = pd.io.json.json_normalize(c)
_toDatetime(df)
_reindex(df, 'symbol')
return df | [
"def",
"_companyToDF",
"(",
"c",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"df",
"=",
"pd",
".",
"io",
".",
"json",
".",
"json_normalize",
"(",
"c",
")",
"_toDatetime",
"(",
"df",
")",
"_reindex",
"(",
"df",
",",
"'symbol'",
"... | internal | [
"internal"
] | python | valid |
saltstack/salt | salt/engines/slack.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/slack.py#L326-L360 | def can_user_run(self, user, command, groups):
'''
Break out the permissions into the following:
Check whether a user is in any group, including whether a group has the '*' membership
:type user: str
:param user: The username being checked against
:type command: str
:param command: The command that is being invoked (e.g. test.ping)
:type groups: dict
:param groups: the dictionary with groups permissions structure.
:rtype: tuple
:returns: On a successful permitting match, returns 2-element tuple that contains
the name of the group that successfully matched, and a dictionary containing
the configuration of the group so it can be referenced.
On failure it returns an empty tuple
'''
log.info('%s wants to run %s with groups %s', user, command, groups)
for key, val in groups.items():
if user not in val['users']:
if '*' not in val['users']:
continue # this doesn't grant permissions, pass
if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()):
if '*' not in val['commands']:
continue # again, pass
log.info('Slack user %s permitted to run %s', user, command)
return (key, val,) # matched this group, return the group
log.info('Slack user %s denied trying to run %s', user, command)
return () | [
"def",
"can_user_run",
"(",
"self",
",",
"user",
",",
"command",
",",
"groups",
")",
":",
"log",
".",
"info",
"(",
"'%s wants to run %s with groups %s'",
",",
"user",
",",
"command",
",",
"groups",
")",
"for",
"key",
",",
"val",
"in",
"groups",
".",
"ite... | Break out the permissions into the following:
Check whether a user is in any group, including whether a group has the '*' membership
:type user: str
:param user: The username being checked against
:type command: str
:param command: The command that is being invoked (e.g. test.ping)
:type groups: dict
:param groups: the dictionary with groups permissions structure.
:rtype: tuple
:returns: On a successful permitting match, returns 2-element tuple that contains
the name of the group that successfully matched, and a dictionary containing
the configuration of the group so it can be referenced.
On failure it returns an empty tuple | [
"Break",
"out",
"the",
"permissions",
"into",
"the",
"following",
":"
] | python | train |
googlefonts/glyphsLib | Lib/glyphsLib/builder/names.py | https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/names.py#L55-L85 | def build_stylemap_names(
family_name, style_name, is_bold=False, is_italic=False, linked_style=None
):
"""Build UFO `styleMapFamilyName` and `styleMapStyleName` based on the
family and style names, and the entries in the "Style Linking" section
of the "Instances" tab in the "Font Info".
The value of `styleMapStyleName` can be either "regular", "bold", "italic"
or "bold italic", depending on the values of `is_bold` and `is_italic`.
The `styleMapFamilyName` is a combination of the `family_name` and the
`linked_style`.
If `linked_style` is unset or set to 'Regular', the linked style is equal
to the style_name with the last occurrences of the strings 'Regular',
'Bold' and 'Italic' stripped from it.
"""
styleMapStyleName = (
" ".join(
s for s in ("bold" if is_bold else "", "italic" if is_italic else "") if s
)
or "regular"
)
if not linked_style or linked_style == "Regular":
linked_style = _get_linked_style(style_name, is_bold, is_italic)
if linked_style:
styleMapFamilyName = (family_name or "") + " " + linked_style
else:
styleMapFamilyName = family_name
return styleMapFamilyName, styleMapStyleName | [
"def",
"build_stylemap_names",
"(",
"family_name",
",",
"style_name",
",",
"is_bold",
"=",
"False",
",",
"is_italic",
"=",
"False",
",",
"linked_style",
"=",
"None",
")",
":",
"styleMapStyleName",
"=",
"(",
"\" \"",
".",
"join",
"(",
"s",
"for",
"s",
"in",... | Build UFO `styleMapFamilyName` and `styleMapStyleName` based on the
family and style names, and the entries in the "Style Linking" section
of the "Instances" tab in the "Font Info".
The value of `styleMapStyleName` can be either "regular", "bold", "italic"
or "bold italic", depending on the values of `is_bold` and `is_italic`.
The `styleMapFamilyName` is a combination of the `family_name` and the
`linked_style`.
If `linked_style` is unset or set to 'Regular', the linked style is equal
to the style_name with the last occurrences of the strings 'Regular',
'Bold' and 'Italic' stripped from it. | [
"Build",
"UFO",
"styleMapFamilyName",
"and",
"styleMapStyleName",
"based",
"on",
"the",
"family",
"and",
"style",
"names",
"and",
"the",
"entries",
"in",
"the",
"Style",
"Linking",
"section",
"of",
"the",
"Instances",
"tab",
"in",
"the",
"Font",
"Info",
"."
] | python | train |
jilljenn/tryalgo | tryalgo/binary_search.py | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/binary_search.py#L46-L64 | def continuous_binary_search(f, lo, hi, gap=1e-4):
"""Binary search for a function
:param f: boolean monotone function with f(hi) = True
:param int lo:
:param int hi: with hi >= lo
:param float gap:
:returns: first value x in [lo,hi] such that f(x),
x is computed up to some precision
:complexity: `O(log((hi-lo)/gap))`
"""
while hi - lo > gap:
# in other languages you can force floating division by using 2.0
mid = (lo + hi) / 2.
if f(mid):
hi = mid
else:
lo = mid
return lo | [
"def",
"continuous_binary_search",
"(",
"f",
",",
"lo",
",",
"hi",
",",
"gap",
"=",
"1e-4",
")",
":",
"while",
"hi",
"-",
"lo",
">",
"gap",
":",
"# in other languages you can force floating division by using 2.0",
"mid",
"=",
"(",
"lo",
"+",
"hi",
")",
"/",
... | Binary search for a function
:param f: boolean monotone function with f(hi) = True
:param int lo:
:param int hi: with hi >= lo
:param float gap:
:returns: first value x in [lo,hi] such that f(x),
x is computed up to some precision
:complexity: `O(log((hi-lo)/gap))` | [
"Binary",
"search",
"for",
"a",
"function"
] | python | train |
Jammy2211/PyAutoLens | autolens/model/profiles/geometry_profiles.py | https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/profiles/geometry_profiles.py#L285-L299 | def rotate_grid_from_profile(self, grid_elliptical):
""" Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the \
unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).
This routine is used after computing deflection angles in the reference frame of the profile, so that the \
deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.
Parameters
----------
grid_elliptical : TransformedGrid(ndarray)
The (y, x) coordinates in the reference frame of an elliptical profile.
"""
y = np.add(np.multiply(grid_elliptical[:, 1], self.sin_phi), np.multiply(grid_elliptical[:, 0], self.cos_phi))
x = np.add(np.multiply(grid_elliptical[:, 1], self.cos_phi), - np.multiply(grid_elliptical[:, 0], self.sin_phi))
return np.vstack((y, x)).T | [
"def",
"rotate_grid_from_profile",
"(",
"self",
",",
"grid_elliptical",
")",
":",
"y",
"=",
"np",
".",
"add",
"(",
"np",
".",
"multiply",
"(",
"grid_elliptical",
"[",
":",
",",
"1",
"]",
",",
"self",
".",
"sin_phi",
")",
",",
"np",
".",
"multiply",
"... | Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the \
unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).
This routine is used after computing deflection angles in the reference frame of the profile, so that the \
deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.
Parameters
----------
grid_elliptical : TransformedGrid(ndarray)
The (y, x) coordinates in the reference frame of an elliptical profile. | [
"Rotate",
"a",
"grid",
"of",
"elliptical",
"(",
"y",
"x",
")",
"coordinates",
"from",
"the",
"reference",
"frame",
"of",
"the",
"profile",
"back",
"to",
"the",
"\\",
"unrotated",
"coordinate",
"grid",
"reference",
"frame",
"(",
"coordinates",
"are",
"not",
... | python | valid |
inasafe/inasafe | safe/gui/tools/options_dialog.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/options_dialog.py#L599-L615 | def set_user_dir(self):
"""Auto-connect slot activated when user dir checkbox is toggled.
"""
is_checked = self.custom_UseUserDirectory_checkbox.isChecked()
if is_checked:
# Show previous templates dir
path = setting(
key='defaultUserDirectory',
default='',
expected_type=str,
qsettings=self.settings)
else:
# Set the template report dir to ''
path = temp_dir('impacts')
self.leUserDirectoryPath.setText(path)
self.splitter_user_directory.setEnabled(is_checked) | [
"def",
"set_user_dir",
"(",
"self",
")",
":",
"is_checked",
"=",
"self",
".",
"custom_UseUserDirectory_checkbox",
".",
"isChecked",
"(",
")",
"if",
"is_checked",
":",
"# Show previous templates dir",
"path",
"=",
"setting",
"(",
"key",
"=",
"'defaultUserDirectory'",... | Auto-connect slot activated when user dir checkbox is toggled. | [
"Auto",
"-",
"connect",
"slot",
"activated",
"when",
"user",
"dir",
"checkbox",
"is",
"toggled",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/outlineexplorer/widgets.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/outlineexplorer/widgets.py#L376-L528 | def populate_branch(self, editor, root_item, tree_cache=None):
"""
Generates an outline of the editor's content and stores the result
in a cache.
"""
if tree_cache is None:
tree_cache = {}
# Removing cached items for which line is > total line nb
for _l in list(tree_cache.keys()):
if _l >= editor.get_line_count():
# Checking if key is still in tree cache in case one of its
# ancestors was deleted in the meantime (deleting all children):
if _l in tree_cache:
remove_from_tree_cache(tree_cache, line=_l)
ancestors = [(root_item, 0)]
cell_ancestors = [(root_item, 0)]
previous_item = None
previous_level = None
prev_cell_level = None
prev_cell_item = None
oe_data = editor.get_outlineexplorer_data()
for block_nb in range(editor.get_line_count()):
line_nb = block_nb+1
data = oe_data.get(block_nb)
level = None if data is None else data.fold_level
citem, clevel, _d = tree_cache.get(line_nb, (None, None, ""))
# Skip iteration if line is not the first line of a foldable block
if level is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Searching for class/function statements
not_class_nor_function = data.is_not_class_nor_function()
if not not_class_nor_function:
class_name = data.get_class_name()
if class_name is None:
func_name = data.get_function_name()
if func_name is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Skip iteration for if/else/try/for/etc foldable blocks.
if not_class_nor_function and not data.is_comment():
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
cname = to_text_string(citem.text(0))
cparent = citem.parent
# Blocks for Cell Groups.
if (data is not None and data.def_type == data.CELL and
self.group_cells):
preceding = (root_item if previous_item is None
else previous_item)
cell_level = data.cell_level
if prev_cell_level is not None:
if cell_level == prev_cell_level:
pass
elif cell_level > prev_cell_level:
cell_ancestors.append((prev_cell_item,
prev_cell_level))
else:
while (len(cell_ancestors) > 1 and
cell_level <= prev_cell_level):
cell_ancestors.pop(-1)
_item, prev_cell_level = cell_ancestors[-1]
parent, _level = cell_ancestors[-1]
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = CellItem(data.def_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
ancestors = [(item, 0)]
prev_cell_level = cell_level
prev_cell_item = item
previous_item = item
continue
# Blocks for Code Groups.
if previous_level is not None:
if level == previous_level:
pass
elif level > previous_level:
ancestors.append((previous_item, previous_level))
else:
while len(ancestors) > 1 and level <= previous_level:
ancestors.pop(-1)
_item, previous_level = ancestors[-1]
parent, _level = ancestors[-1]
preceding = root_item if previous_item is None else previous_item
if not_class_nor_function and data.is_comment():
if not self.show_comments:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
if data.def_type == data.CELL:
item = CellItem(data.def_name, line_nb, parent, preceding)
else:
item = CommentItem(data.text, line_nb, parent, preceding)
elif class_name is not None:
if citem is not None:
if (class_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = ClassItem(class_name, line_nb, parent, preceding)
else:
if citem is not None:
if (func_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = FunctionItem(func_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
previous_level = level
previous_item = item
return tree_cache | [
"def",
"populate_branch",
"(",
"self",
",",
"editor",
",",
"root_item",
",",
"tree_cache",
"=",
"None",
")",
":",
"if",
"tree_cache",
"is",
"None",
":",
"tree_cache",
"=",
"{",
"}",
"# Removing cached items for which line is > total line nb\r",
"for",
"_l",
"in",
... | Generates an outline of the editor's content and stores the result
in a cache. | [
"Generates",
"an",
"outline",
"of",
"the",
"editor",
"s",
"content",
"and",
"stores",
"the",
"result",
"in",
"a",
"cache",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_args.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_args.py#L179-L193 | def default_args(self):
"""Parse args and return default args."""
if self._default_args is None:
self._default_args, unknown = self.parser.parse_known_args() # pylint: disable=W0612
# reinitialize logger with new log level and api settings
self.tcex._logger()
if self._default_args.tc_aot_enabled:
# block for AOT message and get params
params = self.tcex.playbook.aot_blpop()
self.inject_params(params)
elif self._default_args.tc_secure_params:
# inject secure params from API
params = self._load_secure_params()
self.inject_params(params)
return self._default_args | [
"def",
"default_args",
"(",
"self",
")",
":",
"if",
"self",
".",
"_default_args",
"is",
"None",
":",
"self",
".",
"_default_args",
",",
"unknown",
"=",
"self",
".",
"parser",
".",
"parse_known_args",
"(",
")",
"# pylint: disable=W0612",
"# reinitialize logger wi... | Parse args and return default args. | [
"Parse",
"args",
"and",
"return",
"default",
"args",
"."
] | python | train |
myint/rstcheck | rstcheck.py | https://github.com/myint/rstcheck/blob/2f975906b75f3b88d501ef3b13d213815cf7079a/rstcheck.py#L625-L640 | def parse_gcc_style_error_message(message, filename, has_column=True):
"""Parse GCC-style error message.
Return (line_number, message). Raise ValueError if message cannot be
parsed.
"""
colons = 2 if has_column else 1
prefix = filename + ':'
if not message.startswith(prefix):
raise ValueError()
message = message[len(prefix):]
split_message = message.split(':', colons)
line_number = int(split_message[0])
return (line_number,
split_message[colons].strip()) | [
"def",
"parse_gcc_style_error_message",
"(",
"message",
",",
"filename",
",",
"has_column",
"=",
"True",
")",
":",
"colons",
"=",
"2",
"if",
"has_column",
"else",
"1",
"prefix",
"=",
"filename",
"+",
"':'",
"if",
"not",
"message",
".",
"startswith",
"(",
"... | Parse GCC-style error message.
Return (line_number, message). Raise ValueError if message cannot be
parsed. | [
"Parse",
"GCC",
"-",
"style",
"error",
"message",
"."
] | python | train |
wolfhong/formic | build.py | https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/build.py#L47-L56 | def pylint_jenkins():
"""Run PyLint on the code and produce a report suitable for the
Jenkins plugin 'violations'.
Note that there is a bug in the Violations plugin which means that
absolute paths to source (produced by PyLint) are not read. The sed command
removes the workspace part of the path making everything good again. This
requires the environment variable WORKSPACE from Jenkins"""
cmd = '{0} formic -f parseable'.format(PYLINT_EXE).split(' ')
return dovetail.call(cmd, stdout=BUILD_PYLINT) | [
"def",
"pylint_jenkins",
"(",
")",
":",
"cmd",
"=",
"'{0} formic -f parseable'",
".",
"format",
"(",
"PYLINT_EXE",
")",
".",
"split",
"(",
"' '",
")",
"return",
"dovetail",
".",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"BUILD_PYLINT",
")"
] | Run PyLint on the code and produce a report suitable for the
Jenkins plugin 'violations'.
Note that there is a bug in the Violations plugin which means that
absolute paths to source (produced by PyLint) are not read. The sed command
removes the workspace part of the path making everything good again. This
requires the environment variable WORKSPACE from Jenkins | [
"Run",
"PyLint",
"on",
"the",
"code",
"and",
"produce",
"a",
"report",
"suitable",
"for",
"the",
"Jenkins",
"plugin",
"violations",
"."
] | python | train |
snare/voltron | voltron/core.py | https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/core.py#L251-L261 | def cancel_queue(self):
"""
Cancel all requests in the queue so we can exit.
"""
q = list(self.queue)
self.queue = []
log.debug("Canceling requests: {}".format(q))
for req in q:
req.response = APIServerNotRunningErrorResponse()
for req in q:
req.signal() | [
"def",
"cancel_queue",
"(",
"self",
")",
":",
"q",
"=",
"list",
"(",
"self",
".",
"queue",
")",
"self",
".",
"queue",
"=",
"[",
"]",
"log",
".",
"debug",
"(",
"\"Canceling requests: {}\"",
".",
"format",
"(",
"q",
")",
")",
"for",
"req",
"in",
"q",... | Cancel all requests in the queue so we can exit. | [
"Cancel",
"all",
"requests",
"in",
"the",
"queue",
"so",
"we",
"can",
"exit",
"."
] | python | train |
myint/autoflake | autoflake.py | https://github.com/myint/autoflake/blob/68fea68646922b920d55975f9f2adaeafd84df4f/autoflake.py#L108-L112 | def unused_import_line_numbers(messages):
"""Yield line numbers of unused imports."""
for message in messages:
if isinstance(message, pyflakes.messages.UnusedImport):
yield message.lineno | [
"def",
"unused_import_line_numbers",
"(",
"messages",
")",
":",
"for",
"message",
"in",
"messages",
":",
"if",
"isinstance",
"(",
"message",
",",
"pyflakes",
".",
"messages",
".",
"UnusedImport",
")",
":",
"yield",
"message",
".",
"lineno"
] | Yield line numbers of unused imports. | [
"Yield",
"line",
"numbers",
"of",
"unused",
"imports",
"."
] | python | test |
Sliim/soundcloud-syncer | ssyncer/suser.py | https://github.com/Sliim/soundcloud-syncer/blob/f15142677bf8e5fb54f40b0eb9a36f21ba940ab6/ssyncer/suser.py#L50-L56 | def get_playlists(self, offset=0, limit=50):
""" Get user's playlists. """
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists | [
"def",
"get_playlists",
"(",
"self",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"50",
")",
":",
"response",
"=",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"client",
".",
"USER_PLAYLISTS",
"%",
"(",
"self",
".",
"name",
",",
"offset",
",... | Get user's playlists. | [
"Get",
"user",
"s",
"playlists",
"."
] | python | train |
sdispater/poetry | poetry/installation/installer.py | https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/installation/installer.py#L111-L119 | def lock(self): # type: () -> Installer
"""
Prepare the installer for locking only.
"""
self.update()
self.execute_operations(False)
self._lock = True
return self | [
"def",
"lock",
"(",
"self",
")",
":",
"# type: () -> Installer",
"self",
".",
"update",
"(",
")",
"self",
".",
"execute_operations",
"(",
"False",
")",
"self",
".",
"_lock",
"=",
"True",
"return",
"self"
] | Prepare the installer for locking only. | [
"Prepare",
"the",
"installer",
"for",
"locking",
"only",
"."
] | python | train |
jalmeroth/pymusiccast | pymusiccast/__init__.py | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L162-L168 | def handle_status(self):
"""Handle status from device"""
status = self.get_status()
if status:
# Update main-zone
self.zones['main'].update_status(status) | [
"def",
"handle_status",
"(",
"self",
")",
":",
"status",
"=",
"self",
".",
"get_status",
"(",
")",
"if",
"status",
":",
"# Update main-zone",
"self",
".",
"zones",
"[",
"'main'",
"]",
".",
"update_status",
"(",
"status",
")"
] | Handle status from device | [
"Handle",
"status",
"from",
"device"
] | python | train |
AleksTk/table-logger | table_logger/table_logger.py | https://github.com/AleksTk/table-logger/blob/d2326e053fb972ed7ae4950d0e8c6e7c9f4399b8/table_logger/table_logger.py#L208-L254 | def setup_formatters(self, *args):
"""Setup formatters by observing the first row.
Args:
*args: row cells
"""
formatters = []
col_offset = 0
# initialize formatters for row-id, timestamp and time-diff columns
if self.rownum:
formatters.append(fmt.RowNumberFormatter.setup(0))
col_offset += 1
if self.timestamp:
formatters.append(fmt.DatetimeFormatter.setup(
datetime.datetime.now(),
fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format,
col_width=26))
col_offset += 1
if self.time_diff:
formatters.append(fmt.TimeDeltaFormatter.setup(0))
col_offset += 1
# initialize formatters for user-defined columns
for coli, value in enumerate(args):
fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)
kwargs = {}
# set column width
if self.default_colwidth is not None:
kwargs['col_width'] = self.default_colwidth
if coli in self.column_widths:
kwargs['col_width'] = self.column_widths[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_widths:
kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]]
# set formatter function
if fmt_class == fmt.FloatFormatter and self.float_format is not None:
kwargs['fmt'] = self.float_format
if coli in self.column_formatters:
kwargs['fmt'] = self.column_formatters[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_formatters:
kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]]
formatter = fmt_class.setup(value, **kwargs)
formatters.append(formatter)
self.formatters = formatters | [
"def",
"setup_formatters",
"(",
"self",
",",
"*",
"args",
")",
":",
"formatters",
"=",
"[",
"]",
"col_offset",
"=",
"0",
"# initialize formatters for row-id, timestamp and time-diff columns",
"if",
"self",
".",
"rownum",
":",
"formatters",
".",
"append",
"(",
"fmt... | Setup formatters by observing the first row.
Args:
*args: row cells | [
"Setup",
"formatters",
"by",
"observing",
"the",
"first",
"row",
".",
"Args",
":",
"*",
"args",
":",
"row",
"cells"
] | python | valid |
log2timeline/plaso | plaso/parsers/syslog.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/syslog.py#L214-L237 | def _UpdateYear(self, mediator, month):
"""Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1.
"""
if not self._year_use:
self._year_use = mediator.GetEstimatedYear()
if not self._maximum_year:
self._maximum_year = mediator.GetLatestYear()
if not self._last_month:
self._last_month = month
return
# Some syslog daemons allow out-of-order sequences, so allow some leeway
# to not cause Apr->May->Apr to cause the year to increment.
# See http://bugzilla.adiscon.com/show_bug.cgi?id=527
if self._last_month > (month + 1):
if self._year_use != self._maximum_year:
self._year_use += 1
self._last_month = month | [
"def",
"_UpdateYear",
"(",
"self",
",",
"mediator",
",",
"month",
")",
":",
"if",
"not",
"self",
".",
"_year_use",
":",
"self",
".",
"_year_use",
"=",
"mediator",
".",
"GetEstimatedYear",
"(",
")",
"if",
"not",
"self",
".",
"_maximum_year",
":",
"self",
... | Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1. | [
"Updates",
"the",
"year",
"to",
"use",
"for",
"events",
"based",
"on",
"last",
"observed",
"month",
"."
] | python | train |
smarie/python-valid8 | valid8/validation_lib/collections.py | https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/validation_lib/collections.py#L353-L404 | def on_each_(*validation_functions_collection):
"""
Generates a validation_function for collection inputs where each element of the input will be validated against the
corresponding validation_function(s) in the validation_functions_collection. Validators inside the tuple can be
provided as a list for convenience, this will be replaced with an 'and_' operator if the list has more than one
element.
Note that if you want to apply the SAME validation_functions to all elements in the input, you should rather use
on_all_.
:param validation_functions_collection: a sequence of (base validation function or list of base validation functions
to use).
A base validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or
a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list).
Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/)
expressions can be used instead of callables, they will be transformed to functions automatically.
:return:
"""
# create a tuple of validation functions.
validation_function_funcs = tuple(_process_validation_function_s(validation_func)
for validation_func in validation_functions_collection)
# generate a validation function based on the tuple of validation_functions lists
def on_each_val(x # type: Tuple
):
if len(validation_function_funcs) != len(x):
raise Failure('on_each_: x does not have the same number of elements than validation_functions_collection.')
else:
# apply each validation_function on the input with the same position in the collection
idx = -1
for elt, validation_function_func in zip(x, validation_function_funcs):
idx += 1
try:
res = validation_function_func(elt)
except Exception as e:
raise InvalidItemInSequence(wrong_value=elt,
wrapped_func=validation_function_func,
validation_outcome=e)
if not result_is_success(res):
# one validation_function was unhappy > raise
# raise Failure('on_each_(' + str(validation_functions_collection) + '): _validation_function [' + str(idx)
# + '] (' + str(validation_functions_collection[idx]) + ') failed validation for '
# 'input ' + str(x[idx]))
raise InvalidItemInSequence(wrong_value=elt,
wrapped_func=validation_function_func,
validation_outcome=res)
return True
on_each_val.__name__ = 'map_<{}>_on_elts' \
''.format('(' + ', '.join([get_callable_name(f) for f in validation_function_funcs]) + ')')
return on_each_val | [
"def",
"on_each_",
"(",
"*",
"validation_functions_collection",
")",
":",
"# create a tuple of validation functions.",
"validation_function_funcs",
"=",
"tuple",
"(",
"_process_validation_function_s",
"(",
"validation_func",
")",
"for",
"validation_func",
"in",
"validation_func... | Generates a validation_function for collection inputs where each element of the input will be validated against the
corresponding validation_function(s) in the validation_functions_collection. Validators inside the tuple can be
provided as a list for convenience, this will be replaced with an 'and_' operator if the list has more than one
element.
Note that if you want to apply the SAME validation_functions to all elements in the input, you should rather use
on_all_.
:param validation_functions_collection: a sequence of (base validation function or list of base validation functions
to use).
A base validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or
a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list).
Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/)
expressions can be used instead of callables, they will be transformed to functions automatically.
:return: | [
"Generates",
"a",
"validation_function",
"for",
"collection",
"inputs",
"where",
"each",
"element",
"of",
"the",
"input",
"will",
"be",
"validated",
"against",
"the",
"corresponding",
"validation_function",
"(",
"s",
")",
"in",
"the",
"validation_functions_collection"... | python | train |
spotify/docker_interface | docker_interface/util.py | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L77-L111 | def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance | [
"def",
"get_value",
"(",
"instance",
",",
"path",
",",
"ref",
"=",
"None",
")",
":",
"for",
"part",
"in",
"split_path",
"(",
"path",
",",
"ref",
")",
":",
"if",
"isinstance",
"(",
"instance",
",",
"list",
")",
":",
"part",
"=",
"int",
"(",
"part",
... | Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary | [
"Get",
"the",
"value",
"from",
"instance",
"at",
"the",
"given",
"path",
"."
] | python | train |
mozilla-releng/scriptworker | scriptworker/cot/verify.py | https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/cot/verify.py#L1690-L1703 | async def verify_docker_image_task(chain, link):
"""Verify the docker image Link.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking.
"""
errors = []
# workerType
worker_type = get_worker_type(link.task)
if worker_type not in chain.context.config['valid_docker_image_worker_types']:
errors.append("{} is not a valid docker-image workerType!".format(worker_type))
raise_on_errors(errors) | [
"async",
"def",
"verify_docker_image_task",
"(",
"chain",
",",
"link",
")",
":",
"errors",
"=",
"[",
"]",
"# workerType",
"worker_type",
"=",
"get_worker_type",
"(",
"link",
".",
"task",
")",
"if",
"worker_type",
"not",
"in",
"chain",
".",
"context",
".",
... | Verify the docker image Link.
Args:
chain (ChainOfTrust): the chain we're operating on.
link (LinkOfTrust): the task link we're checking. | [
"Verify",
"the",
"docker",
"image",
"Link",
"."
] | python | train |
craffel/mir_eval | mir_eval/pattern.py | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L571-L614 | def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5):
"""First n target proportion establishment recall metric.
This metric is similar is similar to the establishment FPR score, but it
only takes into account the first n estimated patterns and it only
outputs the Recall value of it.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> R = mir_eval.pattern.first_n_target_proportion_R(
... ref_patterns, est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix.
(Default value = 5)
Returns
-------
recall : float
The first n target proportion Recall.
"""
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
F, P, R = establishment_FPR(reference_patterns, fn_est_patterns)
return R | [
"def",
"first_n_target_proportion_R",
"(",
"reference_patterns",
",",
"estimated_patterns",
",",
"n",
"=",
"5",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"# If no patterns were provided, metric is zero",
"if",
"_n_onset_midi",
"(",
"... | First n target proportion establishment recall metric.
This metric is similar is similar to the establishment FPR score, but it
only takes into account the first n estimated patterns and it only
outputs the Recall value of it.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> R = mir_eval.pattern.first_n_target_proportion_R(
... ref_patterns, est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix.
(Default value = 5)
Returns
-------
recall : float
The first n target proportion Recall. | [
"First",
"n",
"target",
"proportion",
"establishment",
"recall",
"metric",
"."
] | python | train |
saltstack/salt | salt/cli/salt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/salt.py#L350-L362 | def _progress_ret(self, progress, out):
'''
Print progress events
'''
import salt.output
# Get the progress bar
if not hasattr(self, 'progress_bar'):
try:
self.progress_bar = salt.output.get_progress(self.config, out, progress)
except Exception:
raise LoaderError('\nWARNING: Install the `progressbar` python package. '
'Requested job was still run but output cannot be displayed.\n')
salt.output.update_progress(self.config, progress, self.progress_bar, out) | [
"def",
"_progress_ret",
"(",
"self",
",",
"progress",
",",
"out",
")",
":",
"import",
"salt",
".",
"output",
"# Get the progress bar",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'progress_bar'",
")",
":",
"try",
":",
"self",
".",
"progress_bar",
"=",
"salt... | Print progress events | [
"Print",
"progress",
"events"
] | python | train |
wbond/asn1crypto | asn1crypto/core.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L1808-L1820 | def set(self, value):
"""
Sets the value of the object
:param value:
True, False or another value that works with bool()
"""
self._native = bool(value)
self.contents = b'\x00' if not value else b'\xff'
self._header = None
if self._trailer != b'':
self._trailer = b'' | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_native",
"=",
"bool",
"(",
"value",
")",
"self",
".",
"contents",
"=",
"b'\\x00'",
"if",
"not",
"value",
"else",
"b'\\xff'",
"self",
".",
"_header",
"=",
"None",
"if",
"self",
".",
"... | Sets the value of the object
:param value:
True, False or another value that works with bool() | [
"Sets",
"the",
"value",
"of",
"the",
"object"
] | python | train |
appliedsec/pygeoip | pygeoip/__init__.py | https://github.com/appliedsec/pygeoip/blob/2a725df0b727e8b08f217ab84f7b8243c42554f5/pygeoip/__init__.py#L243-L265 | def _get_org(self, ipnum):
"""
Seek and return organization or ISP name for ipnum.
Return org/isp name.
:arg ipnum: Result of ip2long conversion
"""
seek_org = self._seek_country(ipnum)
if seek_org == self._databaseSegments:
return None
read_length = (2 * self._recordLength - 1) * self._databaseSegments
try:
self._lock.acquire()
self._fp.seek(seek_org + read_length, os.SEEK_SET)
buf = self._fp.read(const.MAX_ORG_RECORD_LENGTH)
finally:
self._lock.release()
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
return buf[:buf.index(chr(0))] | [
"def",
"_get_org",
"(",
"self",
",",
"ipnum",
")",
":",
"seek_org",
"=",
"self",
".",
"_seek_country",
"(",
"ipnum",
")",
"if",
"seek_org",
"==",
"self",
".",
"_databaseSegments",
":",
"return",
"None",
"read_length",
"=",
"(",
"2",
"*",
"self",
".",
"... | Seek and return organization or ISP name for ipnum.
Return org/isp name.
:arg ipnum: Result of ip2long conversion | [
"Seek",
"and",
"return",
"organization",
"or",
"ISP",
"name",
"for",
"ipnum",
".",
"Return",
"org",
"/",
"isp",
"name",
"."
] | python | valid |
decryptus/sonicprobe | sonicprobe/libs/threading_tcp_server.py | https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/threading_tcp_server.py#L59-L69 | def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.workerpool.run(self.process_request_thread,
**{'request': request,
'client_address': client_address}) | [
"def",
"handle_request",
"(",
"self",
")",
":",
"try",
":",
"request",
",",
"client_address",
"=",
"self",
".",
"get_request",
"(",
")",
"except",
"socket",
".",
"error",
":",
"return",
"if",
"self",
".",
"verify_request",
"(",
"request",
",",
"client_addr... | simply collect requests and put them on the queue for the workers. | [
"simply",
"collect",
"requests",
"and",
"put",
"them",
"on",
"the",
"queue",
"for",
"the",
"workers",
"."
] | python | train |
UUDigitalHumanitieslab/tei_reader | tei_reader/models/division.py | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/division.py#L6-L14 | def text(self):
"""Get the entire text content as str"""
divisions = list(self.divisions)
if len(divisions) == 0:
return ''
elif len(divisions) == 1:
return divisions[0].text.strip()
else:
return super().text | [
"def",
"text",
"(",
"self",
")",
":",
"divisions",
"=",
"list",
"(",
"self",
".",
"divisions",
")",
"if",
"len",
"(",
"divisions",
")",
"==",
"0",
":",
"return",
"''",
"elif",
"len",
"(",
"divisions",
")",
"==",
"1",
":",
"return",
"divisions",
"["... | Get the entire text content as str | [
"Get",
"the",
"entire",
"text",
"content",
"as",
"str"
] | python | train |
inasafe/inasafe | safe/gui/tools/peta_bencana_dialog.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/peta_bencana_dialog.py#L372-L384 | def copy_style(shapefile_path):
"""Copy style from the OSM resource directory to the output path.
.. versionadded: 3.3
:param shapefile_path: Path to the shapefile that should get the path
added.
:type shapefile_path: basestring
"""
source_qml_path = resources_path('petabencana', 'flood-style.qml')
output_qml_path = shapefile_path.replace('shp', 'qml')
LOGGER.info('Copying qml to: %s' % output_qml_path)
copy(source_qml_path, output_qml_path) | [
"def",
"copy_style",
"(",
"shapefile_path",
")",
":",
"source_qml_path",
"=",
"resources_path",
"(",
"'petabencana'",
",",
"'flood-style.qml'",
")",
"output_qml_path",
"=",
"shapefile_path",
".",
"replace",
"(",
"'shp'",
",",
"'qml'",
")",
"LOGGER",
".",
"info",
... | Copy style from the OSM resource directory to the output path.
.. versionadded: 3.3
:param shapefile_path: Path to the shapefile that should get the path
added.
:type shapefile_path: basestring | [
"Copy",
"style",
"from",
"the",
"OSM",
"resource",
"directory",
"to",
"the",
"output",
"path",
"."
] | python | train |
urinieto/msaf | msaf/algorithms/fmc2d/utils_2dfmc.py | https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L11-L45 | def resample_mx(X, incolpos, outcolpos):
"""
Y = resample_mx(X, incolpos, outcolpos)
X is taken as a set of columns, each starting at 'time'
colpos, and continuing until the start of the next column.
Y is a similar matrix, with time boundaries defined by
outcolpos. Each column of Y is a duration-weighted average of
the overlapping columns of X.
2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg
-> python: TBM, 2011-11-05, TESTED
"""
noutcols = len(outcolpos)
Y = np.zeros((X.shape[0], noutcols))
# assign 'end times' to final columns
if outcolpos.max() > incolpos.max():
incolpos = np.concatenate([incolpos,[outcolpos.max()]])
X = np.concatenate([X, X[:,-1].reshape(X.shape[0],1)], axis=1)
outcolpos = np.concatenate([outcolpos, [outcolpos[-1]]])
# durations (default weights) of input columns)
incoldurs = np.concatenate([np.diff(incolpos), [1]])
for c in range(noutcols):
firstincol = np.where(incolpos <= outcolpos[c])[0][-1]
firstincolnext = np.where(incolpos < outcolpos[c+1])[0][-1]
lastincol = max(firstincol,firstincolnext)
# default weights
wts = copy.deepcopy(incoldurs[firstincol:lastincol+1])
# now fix up by partial overlap at ends
if len(wts) > 1:
wts[0] = wts[0] - (outcolpos[c] - incolpos[firstincol])
wts[-1] = wts[-1] - (incolpos[lastincol+1] - outcolpos[c+1])
wts = wts * 1. / float(sum(wts))
Y[:,c] = np.dot(X[:,firstincol:lastincol+1], wts)
# done
return Y | [
"def",
"resample_mx",
"(",
"X",
",",
"incolpos",
",",
"outcolpos",
")",
":",
"noutcols",
"=",
"len",
"(",
"outcolpos",
")",
"Y",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"noutcols",
")",
")",
"# assign 'end times' to ... | Y = resample_mx(X, incolpos, outcolpos)
X is taken as a set of columns, each starting at 'time'
colpos, and continuing until the start of the next column.
Y is a similar matrix, with time boundaries defined by
outcolpos. Each column of Y is a duration-weighted average of
the overlapping columns of X.
2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg
-> python: TBM, 2011-11-05, TESTED | [
"Y",
"=",
"resample_mx",
"(",
"X",
"incolpos",
"outcolpos",
")",
"X",
"is",
"taken",
"as",
"a",
"set",
"of",
"columns",
"each",
"starting",
"at",
"time",
"colpos",
"and",
"continuing",
"until",
"the",
"start",
"of",
"the",
"next",
"column",
".",
"Y",
"... | python | test |
pybel/pybel-tools | src/pybel_tools/assembler/html/assembler.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/assembler/html/assembler.py#L179-L181 | def get_triplet_tuple(a: BaseEntity, b: BaseEntity, c: BaseEntity) -> Tuple[str, str, str, str, str, str]:
"""Get the triple as a tuple of BEL/hashes."""
return a.as_bel(), a.sha512, b.as_bel(), b.sha512, c.as_bel(), c.sha512 | [
"def",
"get_triplet_tuple",
"(",
"a",
":",
"BaseEntity",
",",
"b",
":",
"BaseEntity",
",",
"c",
":",
"BaseEntity",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
",",
"str",
",",
"str",
",",
"str",
",",
"str",
"]",
":",
"return",
"a",
".",
"as_bel",
... | Get the triple as a tuple of BEL/hashes. | [
"Get",
"the",
"triple",
"as",
"a",
"tuple",
"of",
"BEL",
"/",
"hashes",
"."
] | python | valid |
idlesign/django-admirarchy | admirarchy/utils.py | https://github.com/idlesign/django-admirarchy/blob/723e4fd212fdebcc156492cb16b9d65356f5ca73/admirarchy/utils.py#L45-L54 | def action_checkbox(self, obj):
"""Renders checkboxes.
Disable checkbox for parent item navigation link.
"""
if getattr(obj, Hierarchy.UPPER_LEVEL_MODEL_ATTR, False):
return ''
return super(HierarchicalModelAdmin, self).action_checkbox(obj) | [
"def",
"action_checkbox",
"(",
"self",
",",
"obj",
")",
":",
"if",
"getattr",
"(",
"obj",
",",
"Hierarchy",
".",
"UPPER_LEVEL_MODEL_ATTR",
",",
"False",
")",
":",
"return",
"''",
"return",
"super",
"(",
"HierarchicalModelAdmin",
",",
"self",
")",
".",
"act... | Renders checkboxes.
Disable checkbox for parent item navigation link. | [
"Renders",
"checkboxes",
"."
] | python | train |
stuaxo/vext | setup.py | https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/setup.py#L199-L205 | def depends_on(self, dependency):
"""
List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow'
"""
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")] | [
"def",
"depends_on",
"(",
"self",
",",
"dependency",
")",
":",
"packages",
"=",
"self",
".",
"package_info",
"(",
")",
"return",
"[",
"package",
"for",
"package",
"in",
"packages",
"if",
"dependency",
"in",
"package",
".",
"get",
"(",
"\"requires\"",
",",
... | List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow' | [
"List",
"of",
"packages",
"that",
"depend",
"on",
"dependency",
":",
"param",
"dependency",
":",
"package",
"name",
"e",
".",
"g",
".",
"vext",
"or",
"Pillow"
] | python | train |
bcbio/bcbio-nextgen | bcbio/cwl/tool.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/tool.py#L253-L277 | def _cromwell_debug(metadata):
"""Format Cromwell failures to make debugging easier.
"""
def get_failed_calls(cur, key=None):
if key is None: key = []
out = []
if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur:
out.append((key, cur))
elif isinstance(cur, dict):
for k, v in cur.items():
out.extend(get_failed_calls(v, key + [k]))
elif isinstance(cur, (list, tuple)):
for i, v in enumerate(cur):
out.extend(get_failed_calls(v, key + [i]))
return out
print("Failed bcbio Cromwell run")
print("-------------------------")
for fail_k, fail_call in get_failed_calls(metadata["calls"]):
root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"]))
print("Failure in step: %s" % ".".join([str(x) for x in fail_k]))
print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log"))
print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log",
"bcbio-nextgen-commands.log"))
print(" Cromwell directory : %s" % root_dir)
print() | [
"def",
"_cromwell_debug",
"(",
"metadata",
")",
":",
"def",
"get_failed_calls",
"(",
"cur",
",",
"key",
"=",
"None",
")",
":",
"if",
"key",
"is",
"None",
":",
"key",
"=",
"[",
"]",
"out",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"cur",
",",
"dict",
... | Format Cromwell failures to make debugging easier. | [
"Format",
"Cromwell",
"failures",
"to",
"make",
"debugging",
"easier",
"."
] | python | train |
fhcrc/nestly | nestly/core.py | https://github.com/fhcrc/nestly/blob/4d7818b5950f405d2067a6b8577d5afb7527c9ff/nestly/core.py#L137-L199 | def add(self, name, nestable, create_dir=True, update=False,
label_func=str, template_subs=False):
"""
Add a level to the nest
:param string name: Name of the level. Forms the key in the output
dictionary.
:param nestable: Either an iterable object containing values, _or_ a
function which takes a single argument (the control dictionary)
and returns an iterable object containing values
:param boolean create_dir: Should a directory level be created for this
nestable?
:param boolean update: Should the control dictionary be updated with
the results of each value returned by the nestable? Only valid for
dictionary results; useful for updating multiple values. At a
minimum, a key-value pair corresponding to ``name`` must be
returned.
:param label_func: Function to be called to convert each value to a
directory label.
:param boolean template_subs: Should the strings in / returned by
nestable be treated as templates? If true, str.format is called
with the current values of the control dictionary.
"""
# Convert everything to functions
if not callable(nestable):
if not _is_iter(nestable):
raise ValueError("Invalid nestable: " + str(nestable))
if is_string(nestable):
warnings.warn(
"Passed a string as an iterable for name {0}".format(name))
old_nestable = nestable
nestable = _repeat_iter(old_nestable)
if template_subs:
nestable = _templated(nestable)
new_controls = []
for outdir, control in self._controls:
for r in nestable(control):
new_outdir, new_control = outdir, control.copy()
if update:
# Make sure expected key exists
if name not in r:
raise KeyError("Missing key for {0}".format(name))
# Check for collisions
u = frozenset(control.keys()) & frozenset(r.keys())
if u:
msg = "Key overlap: {0}".format(u)
if self.fail_on_clash:
raise KeyError(msg)
elif self.warn_on_clash:
warnings.warn(msg)
new_control.update(r)
to_label = r[name]
else:
new_control[name] = to_label = r
if create_dir:
new_outdir = os.path.join(outdir, label_func(to_label))
if self.include_outdir:
new_control['OUTDIR'] = new_outdir
new_controls.append((new_outdir, new_control))
self._controls = new_controls | [
"def",
"add",
"(",
"self",
",",
"name",
",",
"nestable",
",",
"create_dir",
"=",
"True",
",",
"update",
"=",
"False",
",",
"label_func",
"=",
"str",
",",
"template_subs",
"=",
"False",
")",
":",
"# Convert everything to functions",
"if",
"not",
"callable",
... | Add a level to the nest
:param string name: Name of the level. Forms the key in the output
dictionary.
:param nestable: Either an iterable object containing values, _or_ a
function which takes a single argument (the control dictionary)
and returns an iterable object containing values
:param boolean create_dir: Should a directory level be created for this
nestable?
:param boolean update: Should the control dictionary be updated with
the results of each value returned by the nestable? Only valid for
dictionary results; useful for updating multiple values. At a
minimum, a key-value pair corresponding to ``name`` must be
returned.
:param label_func: Function to be called to convert each value to a
directory label.
:param boolean template_subs: Should the strings in / returned by
nestable be treated as templates? If true, str.format is called
with the current values of the control dictionary. | [
"Add",
"a",
"level",
"to",
"the",
"nest"
] | python | train |
pydata/xarray | xarray/coding/cftime_offsets.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/coding/cftime_offsets.py#L523-L528 | def rollforward(self, date):
"""Roll date forward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month) | [
"def",
"rollforward",
"(",
"self",
",",
"date",
")",
":",
"if",
"self",
".",
"onOffset",
"(",
"date",
")",
":",
"return",
"date",
"else",
":",
"return",
"date",
"+",
"YearEnd",
"(",
"month",
"=",
"self",
".",
"month",
")"
] | Roll date forward to nearest end of year | [
"Roll",
"date",
"forward",
"to",
"nearest",
"end",
"of",
"year"
] | python | train |
bioidiap/gridtk | gridtk/models.py | https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L359-L388 | def add_job(session, command_line, name = 'job', dependencies = [], array = None, exec_dir=None, log_dir = None, stop_on_failure = False, **kwargs):
"""Helper function to create a job, add the dependencies and the array jobs."""
job = Job(command_line=command_line, name=name, exec_dir=exec_dir, log_dir=log_dir, array_string=array, stop_on_failure=stop_on_failure, kwargs=kwargs)
session.add(job)
session.flush()
session.refresh(job)
# by default id and unique id are identical, but the id might be overwritten later on
job.id = job.unique
for d in dependencies:
if d == job.unique:
logger.warn("Adding self-dependency of job %d is not allowed" % d)
continue
depending = list(session.query(Job).filter(Job.unique == d))
if len(depending):
session.add(JobDependence(job.unique, depending[0].unique))
else:
logger.warn("Could not find dependent job with id %d in database" % d)
if array:
(start, stop, step) = array
# add array jobs
for i in range(start, stop+1, step):
session.add(ArrayJob(i, job.unique))
session.commit()
return job | [
"def",
"add_job",
"(",
"session",
",",
"command_line",
",",
"name",
"=",
"'job'",
",",
"dependencies",
"=",
"[",
"]",
",",
"array",
"=",
"None",
",",
"exec_dir",
"=",
"None",
",",
"log_dir",
"=",
"None",
",",
"stop_on_failure",
"=",
"False",
",",
"*",
... | Helper function to create a job, add the dependencies and the array jobs. | [
"Helper",
"function",
"to",
"create",
"a",
"job",
"add",
"the",
"dependencies",
"and",
"the",
"array",
"jobs",
"."
] | python | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L507-L548 | async def _send_to_messenger_profile(self, page, content):
"""
The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict)
"""
log_name = ', '.join(repr(x) for x in content.keys())
page_id = page['page_id']
current = await self._get_messenger_profile(page, content.keys())
if dict_is_subset(content, current):
logger.info('Page %s: %s is already up to date', page_id, log_name)
return
params = {
'access_token': page['page_token'],
}
headers = {
'content-type': 'application/json',
}
post = self.session.post(
PROFILE_ENDPOINT,
params=params,
headers=headers,
data=ujson.dumps(content)
)
# noinspection PyBroadException
try:
async with post as r:
await self._handle_fb_response(r)
except Exception:
logger.exception('Page %s: %s could not be set', page_id, log_name)
reporter.report()
else:
logger.info('Page %s: %s was updated', page_id, log_name) | [
"async",
"def",
"_send_to_messenger_profile",
"(",
"self",
",",
"page",
",",
"content",
")",
":",
"log_name",
"=",
"', '",
".",
"join",
"(",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"content",
".",
"keys",
"(",
")",
")",
"page_id",
"=",
"page",
"[",
... | The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict) | [
"The",
"messenger",
"profile",
"API",
"handles",
"all",
"meta",
"-",
"information",
"about",
"the",
"bot",
"like",
"the",
"menu",
".",
"This",
"allows",
"to",
"submit",
"data",
"to",
"this",
"API",
"endpoint",
"."
] | python | train |
markovmodel/msmtools | msmtools/util/annotators.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/util/annotators.py#L187-L228 | def deprecated(*optional_message):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Parameters
----------
*optional_message : str
an optional user level hint which should indicate which feature to use otherwise.
"""
def _deprecated(func, *args, **kw):
caller_stack = stack()[1:]
while len(caller_stack) > 0:
frame = caller_stack.pop(0)
filename = frame[1]
# skip callee frames if they are other decorators or this file(func)
if 'decorator' in filename or __file__ in filename:
continue
else: break
lineno = frame[2]
# avoid cyclic references!
del caller_stack, frame
user_msg = 'Call to deprecated function "%s". Called from %s line %i. %s' \
% (func.__name__, filename, lineno, msg)
warnings.warn_explicit(
user_msg,
category=DeprecationWarning,
filename=filename,
lineno=lineno
)
return func(*args, **kw)
if len(optional_message) == 1 and callable(optional_message[0]):
# this is the function itself, decorate!
msg = ""
return decorate(optional_message[0], _deprecated)
else:
# actually got a message (or empty parenthesis)
msg = optional_message[0] if len(optional_message) > 0 else ""
return decorator(_deprecated) | [
"def",
"deprecated",
"(",
"*",
"optional_message",
")",
":",
"def",
"_deprecated",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"caller_stack",
"=",
"stack",
"(",
")",
"[",
"1",
":",
"]",
"while",
"len",
"(",
"caller_stack",
")",
"... | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Parameters
----------
*optional_message : str
an optional user level hint which should indicate which feature to use otherwise. | [
"This",
"is",
"a",
"decorator",
"which",
"can",
"be",
"used",
"to",
"mark",
"functions",
"as",
"deprecated",
".",
"It",
"will",
"result",
"in",
"a",
"warning",
"being",
"emitted",
"when",
"the",
"function",
"is",
"used",
"."
] | python | train |
hvac/hvac | hvac/api/system_backend/policy.py | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/policy.py#L8-L21 | def list_policies(self):
"""List all configured policies.
Supported methods:
GET: /sys/policy. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/policy'
response = self._adapter.get(
url=api_path,
)
return response.json() | [
"def",
"list_policies",
"(",
"self",
")",
":",
"api_path",
"=",
"'/v1/sys/policy'",
"response",
"=",
"self",
".",
"_adapter",
".",
"get",
"(",
"url",
"=",
"api_path",
",",
")",
"return",
"response",
".",
"json",
"(",
")"
] | List all configured policies.
Supported methods:
GET: /sys/policy. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict | [
"List",
"all",
"configured",
"policies",
"."
] | python | train |
Yubico/yubikey-manager | ykman/cli/fido.py | https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/fido.py#L83-L102 | def info(ctx):
"""
Display status of FIDO2 application.
"""
controller = ctx.obj['controller']
if controller.is_fips:
click.echo('FIPS Approved Mode: {}'.format(
'Yes' if controller.is_in_fips_mode else 'No'))
else:
if controller.has_pin:
try:
click.echo(
'PIN is set, with {} tries left.'.format(
controller.get_pin_retries()))
except CtapError as e:
if e.code == CtapError.ERR.PIN_BLOCKED:
click.echo('PIN is blocked.')
else:
click.echo('PIN is not set.') | [
"def",
"info",
"(",
"ctx",
")",
":",
"controller",
"=",
"ctx",
".",
"obj",
"[",
"'controller'",
"]",
"if",
"controller",
".",
"is_fips",
":",
"click",
".",
"echo",
"(",
"'FIPS Approved Mode: {}'",
".",
"format",
"(",
"'Yes'",
"if",
"controller",
".",
"is... | Display status of FIDO2 application. | [
"Display",
"status",
"of",
"FIDO2",
"application",
"."
] | python | train |
librosa/librosa | librosa/core/time_frequency.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L591-L643 | def hz_to_mel(frequencies, htk=False):
"""Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
0.9
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : number or np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : number or np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz
"""
frequencies = np.asanyarray(frequencies)
if htk:
return 2595.0 * np.log10(1.0 + frequencies / 700.0)
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if frequencies.ndim:
# If we have array data, vectorize
log_t = (frequencies >= min_log_hz)
mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep
elif frequencies >= min_log_hz:
# If we have scalar data, heck directly
mels = min_log_mel + np.log(frequencies / min_log_hz) / logstep
return mels | [
"def",
"hz_to_mel",
"(",
"frequencies",
",",
"htk",
"=",
"False",
")",
":",
"frequencies",
"=",
"np",
".",
"asanyarray",
"(",
"frequencies",
")",
"if",
"htk",
":",
"return",
"2595.0",
"*",
"np",
".",
"log10",
"(",
"1.0",
"+",
"frequencies",
"/",
"700.0... | Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
0.9
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : number or np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : number or np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz | [
"Convert",
"Hz",
"to",
"Mels"
] | python | test |
petrjasek/eve-elastic | eve_elastic/elastic.py | https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L25-L36 | def parse_date(date_str):
"""Parse elastic datetime string."""
if not date_str:
return None
try:
date = ciso8601.parse_datetime(date_str)
if not date:
date = arrow.get(date_str).datetime
except TypeError:
date = arrow.get(date_str[0]).datetime
return date | [
"def",
"parse_date",
"(",
"date_str",
")",
":",
"if",
"not",
"date_str",
":",
"return",
"None",
"try",
":",
"date",
"=",
"ciso8601",
".",
"parse_datetime",
"(",
"date_str",
")",
"if",
"not",
"date",
":",
"date",
"=",
"arrow",
".",
"get",
"(",
"date_str... | Parse elastic datetime string. | [
"Parse",
"elastic",
"datetime",
"string",
"."
] | python | train |
openai/baselines | baselines/ppo2/runner.py | https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/ppo2/runner.py#L69-L74 | def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:]) | [
"def",
"sf01",
"(",
"arr",
")",
":",
"s",
"=",
"arr",
".",
"shape",
"return",
"arr",
".",
"swapaxes",
"(",
"0",
",",
"1",
")",
".",
"reshape",
"(",
"s",
"[",
"0",
"]",
"*",
"s",
"[",
"1",
"]",
",",
"*",
"s",
"[",
"2",
":",
"]",
")"
] | swap and then flatten axes 0 and 1 | [
"swap",
"and",
"then",
"flatten",
"axes",
"0",
"and",
"1"
] | python | valid |
nwilming/ocupy | ocupy/simulator.py | https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/simulator.py#L176-L212 | def _draw(self, prev_angle = None, prev_length = None):
"""
Draws a new length- and angle-difference pair and calculates
length and angle absolutes matching the last saccade drawn.
Parameters:
prev_angle : float, optional
The last angle that was drawn in the current trajectory
prev_length : float, optional
The last length that was drawn in the current trajectory
Note: Either both prev_angle and prev_length have to be given
or none; if only one parameter is given, it will be neglected.
"""
if (prev_angle is None) or (prev_length is None):
(length, angle)= np.unravel_index(self.drawFrom('self.firstLenAng_cumsum', self.getrand('self.firstLenAng_cumsum')),
self.firstLenAng_shape)
angle = angle-((self.firstLenAng_shape[1]-1)/2)
angle += 0.5
length += 0.5
length *= self.fm.pixels_per_degree
else:
ind = int(floor(prev_length/self.fm.pixels_per_degree))
while ind >= len(self.probability_cumsum):
ind -= 1
while not(self.probability_cumsum[ind]).any():
ind -= 1
J, I = np.unravel_index(self.drawFrom('self.probability_cumsum '+repr(ind),self.getrand('self.probability_cumsum '+repr(ind))),
self.full_H1[ind].shape)
angle = reshift((I-self.full_H1[ind].shape[1]/2) + prev_angle)
angle += 0.5
length = J+0.5
length *= self.fm.pixels_per_degree
return angle, length | [
"def",
"_draw",
"(",
"self",
",",
"prev_angle",
"=",
"None",
",",
"prev_length",
"=",
"None",
")",
":",
"if",
"(",
"prev_angle",
"is",
"None",
")",
"or",
"(",
"prev_length",
"is",
"None",
")",
":",
"(",
"length",
",",
"angle",
")",
"=",
"np",
".",
... | Draws a new length- and angle-difference pair and calculates
length and angle absolutes matching the last saccade drawn.
Parameters:
prev_angle : float, optional
The last angle that was drawn in the current trajectory
prev_length : float, optional
The last length that was drawn in the current trajectory
Note: Either both prev_angle and prev_length have to be given
or none; if only one parameter is given, it will be neglected. | [
"Draws",
"a",
"new",
"length",
"-",
"and",
"angle",
"-",
"difference",
"pair",
"and",
"calculates",
"length",
"and",
"angle",
"absolutes",
"matching",
"the",
"last",
"saccade",
"drawn",
"."
] | python | train |
titusjan/argos | argos/utils/cls.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/utils/cls.py#L330-L333 | def is_an_array(var, allow_none=False):
""" Returns True if var is a numpy array.
"""
return isinstance(var, np.ndarray) or (var is None and allow_none) | [
"def",
"is_an_array",
"(",
"var",
",",
"allow_none",
"=",
"False",
")",
":",
"return",
"isinstance",
"(",
"var",
",",
"np",
".",
"ndarray",
")",
"or",
"(",
"var",
"is",
"None",
"and",
"allow_none",
")"
] | Returns True if var is a numpy array. | [
"Returns",
"True",
"if",
"var",
"is",
"a",
"numpy",
"array",
"."
] | python | train |
secnot/django-isbn-field | isbn_field/fields.py | https://github.com/secnot/django-isbn-field/blob/203b7c94a644ab9901370b615d995c2f388219f5/isbn_field/fields.py#L32-L39 | def pre_save(self, model_instance, add):
"""Remove dashes, spaces, and convert isbn to uppercase before saving
when clean_isbn is enabled"""
value = getattr(model_instance, self.attname)
if self.clean_isbn and value not in EMPTY_VALUES:
cleaned_isbn = value.replace(' ', '').replace('-', '').upper()
setattr(model_instance, self.attname, cleaned_isbn)
return super(ISBNField, self).pre_save(model_instance, add) | [
"def",
"pre_save",
"(",
"self",
",",
"model_instance",
",",
"add",
")",
":",
"value",
"=",
"getattr",
"(",
"model_instance",
",",
"self",
".",
"attname",
")",
"if",
"self",
".",
"clean_isbn",
"and",
"value",
"not",
"in",
"EMPTY_VALUES",
":",
"cleaned_isbn"... | Remove dashes, spaces, and convert isbn to uppercase before saving
when clean_isbn is enabled | [
"Remove",
"dashes",
"spaces",
"and",
"convert",
"isbn",
"to",
"uppercase",
"before",
"saving",
"when",
"clean_isbn",
"is",
"enabled"
] | python | train |
jor-/util | util/io/filelock/unix_exclusive.py | https://github.com/jor-/util/blob/0eb0be84430f88885f4d48335596ca8881f85587/util/io/filelock/unix_exclusive.py#L108-L123 | def acquire(self):
"""
Try to aquire the lock.
"""
if self.timeout is not None:
sleep_intervals = int(self.timeout / self.sleep_time)
else:
sleep_intervals = float('inf')
while not self.acquire_try_once() and sleep_intervals > 0:
time.sleep(self.sleep_time)
sleep_intervals -= 1
if not self.is_locked_by_me():
raise util.io.filelock.general.FileLockTimeoutError(self.lock_filename, self.timeout) | [
"def",
"acquire",
"(",
"self",
")",
":",
"if",
"self",
".",
"timeout",
"is",
"not",
"None",
":",
"sleep_intervals",
"=",
"int",
"(",
"self",
".",
"timeout",
"/",
"self",
".",
"sleep_time",
")",
"else",
":",
"sleep_intervals",
"=",
"float",
"(",
"'inf'"... | Try to aquire the lock. | [
"Try",
"to",
"aquire",
"the",
"lock",
"."
] | python | train |
theonion/djes | djes/models.py | https://github.com/theonion/djes/blob/8f7347382c74172e82e959e3dfbc12b18fbb523f/djes/models.py#L194-L199 | def delete_index(self, refresh=False, ignore=None):
"""Removes the object from the index if `indexed=False`"""
es = connections.get_connection("default")
index = self.__class__.search_objects.mapping.index
doc_type = self.__class__.search_objects.mapping.doc_type
es.delete(index, doc_type, id=self.pk, refresh=refresh, ignore=ignore) | [
"def",
"delete_index",
"(",
"self",
",",
"refresh",
"=",
"False",
",",
"ignore",
"=",
"None",
")",
":",
"es",
"=",
"connections",
".",
"get_connection",
"(",
"\"default\"",
")",
"index",
"=",
"self",
".",
"__class__",
".",
"search_objects",
".",
"mapping",... | Removes the object from the index if `indexed=False` | [
"Removes",
"the",
"object",
"from",
"the",
"index",
"if",
"indexed",
"=",
"False"
] | python | train |
hubo1016/namedstruct | namedstruct/namedstruct.py | https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L1303-L1315 | def parse(self, buffer, inlineparent = None):
'''
Compatible to Parser.parse()
'''
size = 0
v = []
for i in range(0, self.size): # @UnusedVariable
r = self.innerparser.parse(buffer[size:], None)
if r is None:
return None
v.append(r[0])
size += r[1]
return (v, size) | [
"def",
"parse",
"(",
"self",
",",
"buffer",
",",
"inlineparent",
"=",
"None",
")",
":",
"size",
"=",
"0",
"v",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"size",
")",
":",
"# @UnusedVariable",
"r",
"=",
"self",
".",
"... | Compatible to Parser.parse() | [
"Compatible",
"to",
"Parser",
".",
"parse",
"()"
] | python | train |
woolfson-group/isambard | isambard/tools/amino_acids.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/tools/amino_acids.py#L397-L442 | def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False):
""" Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
"""
# If code is already in the dictionary, raise an error
if (not force_add) and code in amino_acids_dict.keys():
raise IOError("{0} is already in the amino_acids dictionary, with values: {1}".format(
code, amino_acids_dict[code]))
# Prepare data to be added.
add_code = code
add_code_dict = {'description': description, 'letter': letter, 'modified': modified}
# Check that data does not already exist, and if not, add it to the dictionary.
amino_acids_dict[add_code] = add_code_dict
# Write over json file with updated dictionary.
with open(_amino_acids_json_path, 'w') as foo:
foo.write(json.dumps(amino_acids_dict))
return | [
"def",
"add_amino_acid_to_json",
"(",
"code",
",",
"description",
",",
"letter",
"=",
"'X'",
",",
"modified",
"=",
"None",
",",
"force_add",
"=",
"False",
")",
":",
"# If code is already in the dictionary, raise an error",
"if",
"(",
"not",
"force_add",
")",
"and"... | Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None | [
"Add",
"an",
"amino",
"acid",
"to",
"the",
"amino_acids",
".",
"json",
"file",
"used",
"to",
"populate",
"the",
"amino_acid",
"table",
"."
] | python | train |
chemlab/chemlab | chemlab/utils/distances.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/utils/distances.py#L38-L96 | def distance_matrix(coords_a, coords_b, cutoff,
periodic=False, method="simple"):
"""Calculate distances matrix the array of coordinates *coord_a*
and *coord_b* within a certain cutoff.
This function is a wrapper around different routines and data structures
for distance searches. It return a np.ndarray containing the distances.
Returns a matrix with all the computed distances. When using the
"cell-lists" method it returns a scipy.sparse.dok_matrix.
**Parameters**
coords_a: np.ndarray((N, 3), dtype=float)
First coordinate array
coords_b: np.ndarray((N, 3), dtype=float)
Second coordinate array
cutoff: float
Maximum distance to search for
periodic: False or np.ndarray((3,), dtype=float)
If False, don't consider periodic images. Otherwise
periodic is an array containing the periodicity in the
3 dimensions.
method: "simple" | "cell-lists"
The method to use. *simple* is a brute-force
distance search, and *cell-lists* uses the cell
linked list method.
"""
coords_a = np.array(coords_a)
coords_b = np.array(coords_b)
if method=="simple":
if periodic is not False:
return distance_array(coords_a, coords_b, cutoff=cutoff,
period=periodic.astype(np.double))
else:
dist = cdist(coords_a, coords_b)
dist[dist > cutoff] = 0
return dist
elif method=="cell-lists":
if periodic is not False:
if np.any(cutoff > periodic/2):
raise Exception("Not working with such a big cutoff.")
# We need all positive elements
mina = coords_a[:, 0].min(), coords_a[:, 1].min(), coords_a[:, 2].min()
minb = coords_b[:, 0].min(), coords_b[:, 1].min(), coords_b[:, 2].min()
# Find the lowest
origin = np.minimum(mina, minb)
a = CellLinkedList(coords_a - origin, cutoff, periodic)
b = CellLinkedList(coords_b - origin, cutoff, periodic)
dist = a.query_distances_other(b, cutoff)
return dist
else:
raise Exception("Method {} not available.".format(method)) | [
"def",
"distance_matrix",
"(",
"coords_a",
",",
"coords_b",
",",
"cutoff",
",",
"periodic",
"=",
"False",
",",
"method",
"=",
"\"simple\"",
")",
":",
"coords_a",
"=",
"np",
".",
"array",
"(",
"coords_a",
")",
"coords_b",
"=",
"np",
".",
"array",
"(",
"... | Calculate distances matrix the array of coordinates *coord_a*
and *coord_b* within a certain cutoff.
This function is a wrapper around different routines and data structures
for distance searches. It return a np.ndarray containing the distances.
Returns a matrix with all the computed distances. When using the
"cell-lists" method it returns a scipy.sparse.dok_matrix.
**Parameters**
coords_a: np.ndarray((N, 3), dtype=float)
First coordinate array
coords_b: np.ndarray((N, 3), dtype=float)
Second coordinate array
cutoff: float
Maximum distance to search for
periodic: False or np.ndarray((3,), dtype=float)
If False, don't consider periodic images. Otherwise
periodic is an array containing the periodicity in the
3 dimensions.
method: "simple" | "cell-lists"
The method to use. *simple* is a brute-force
distance search, and *cell-lists* uses the cell
linked list method. | [
"Calculate",
"distances",
"matrix",
"the",
"array",
"of",
"coordinates",
"*",
"coord_a",
"*",
"and",
"*",
"coord_b",
"*",
"within",
"a",
"certain",
"cutoff",
".",
"This",
"function",
"is",
"a",
"wrapper",
"around",
"different",
"routines",
"and",
"data",
"st... | python | train |
acrazing/dbapi | dbapi/Group.py | https://github.com/acrazing/dbapi/blob/8c1f85cb1a051daf7be1fc97a62c4499983e9898/dbapi/Group.py#L446-L461 | def add_comment(self, topic_id, content, reply_id=None):
"""
添加评论
:param topic_id: 话题ID
:param content: 内容
:param reply_id: 回复ID
:return: None
"""
return self.api.req(API_GROUP_ADD_COMMENT % topic_id, 'post', data={
'ck': self.api.ck(),
'ref_cid': reply_id,
'rv_comment': content,
'start': 0,
'submit_btn': '加上去',
}) | [
"def",
"add_comment",
"(",
"self",
",",
"topic_id",
",",
"content",
",",
"reply_id",
"=",
"None",
")",
":",
"return",
"self",
".",
"api",
".",
"req",
"(",
"API_GROUP_ADD_COMMENT",
"%",
"topic_id",
",",
"'post'",
",",
"data",
"=",
"{",
"'ck'",
":",
"sel... | 添加评论
:param topic_id: 话题ID
:param content: 内容
:param reply_id: 回复ID
:return: None | [
"添加评论",
":",
"param",
"topic_id",
":",
"话题ID",
":",
"param",
"content",
":",
"内容",
":",
"param",
"reply_id",
":",
"回复ID",
":",
"return",
":",
"None"
] | python | train |
fedora-python/pyp2rpm | pyp2rpm/convertor.py | https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/convertor.py#L281-L307 | def client(self):
"""XMLRPC client for PyPI. Always returns the same instance.
If the package is provided as a path to compressed source file,
PyPI will not be used and the client will not be instantiated.
Returns:
XMLRPC client for PyPI or None.
"""
if self.proxy:
proxyhandler = urllib.ProxyHandler({"http": self.proxy})
opener = urllib.build_opener(proxyhandler)
urllib.install_opener(opener)
transport = ProxyTransport()
if not hasattr(self, '_client'):
transport = None
if self.pypi:
if self.proxy:
logger.info('Using provided proxy: {0}.'.format(
self.proxy))
self._client = xmlrpclib.ServerProxy(settings.PYPI_URL,
transport=transport)
self._client_set = True
else:
self._client = None
return self._client | [
"def",
"client",
"(",
"self",
")",
":",
"if",
"self",
".",
"proxy",
":",
"proxyhandler",
"=",
"urllib",
".",
"ProxyHandler",
"(",
"{",
"\"http\"",
":",
"self",
".",
"proxy",
"}",
")",
"opener",
"=",
"urllib",
".",
"build_opener",
"(",
"proxyhandler",
"... | XMLRPC client for PyPI. Always returns the same instance.
If the package is provided as a path to compressed source file,
PyPI will not be used and the client will not be instantiated.
Returns:
XMLRPC client for PyPI or None. | [
"XMLRPC",
"client",
"for",
"PyPI",
".",
"Always",
"returns",
"the",
"same",
"instance",
"."
] | python | train |
chaoss/grimoirelab-sortinghat | sortinghat/api.py | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/api.py#L279-L298 | def add_to_matching_blacklist(db, entity):
"""Add entity to the matching blacklist.
This function adds an 'entity' o term to the matching blacklist.
The term to add cannot have a None or empty value, in this case
a InvalidValueError will be raised. If the given 'entity' exists in the
registry, the function will raise an AlreadyExistsError exception.
:param db: database manager
:param entity: term, word or value to blacklist
:raises InvalidValueError: raised when entity is None or an empty string
:raises AlreadyExistsError: raised when the entity already exists
in the registry.
"""
with db.connect() as session:
try:
add_to_matching_blacklist_db(session, entity)
except ValueError as e:
raise InvalidValueError(e) | [
"def",
"add_to_matching_blacklist",
"(",
"db",
",",
"entity",
")",
":",
"with",
"db",
".",
"connect",
"(",
")",
"as",
"session",
":",
"try",
":",
"add_to_matching_blacklist_db",
"(",
"session",
",",
"entity",
")",
"except",
"ValueError",
"as",
"e",
":",
"r... | Add entity to the matching blacklist.
This function adds an 'entity' o term to the matching blacklist.
The term to add cannot have a None or empty value, in this case
a InvalidValueError will be raised. If the given 'entity' exists in the
registry, the function will raise an AlreadyExistsError exception.
:param db: database manager
:param entity: term, word or value to blacklist
:raises InvalidValueError: raised when entity is None or an empty string
:raises AlreadyExistsError: raised when the entity already exists
in the registry. | [
"Add",
"entity",
"to",
"the",
"matching",
"blacklist",
"."
] | python | train |
jaegertracing/jaeger-client-python | jaeger_client/ioloop_util.py | https://github.com/jaegertracing/jaeger-client-python/blob/06face094757c645a6d81f0e073c001931a22a05/jaeger_client/ioloop_util.py#L22-L60 | def submit(fn, io_loop, *args, **kwargs):
"""Submit Tornado Coroutine to IOLoop.current().
:param fn: Tornado Coroutine to execute
:param io_loop: Tornado IOLoop where to schedule the coroutine
:param args: Args to pass to coroutine
:param kwargs: Kwargs to pass to coroutine
:returns tornado.concurrent.Future: future result of coroutine
"""
future = Future()
def execute():
"""Execute fn on the IOLoop."""
try:
result = gen.maybe_future(fn(*args, **kwargs))
except Exception:
# The function we ran didn't return a future and instead raised
# an exception. Let's pretend that it returned this dummy
# future with our stack trace.
f = gen.Future()
f.set_exc_info(sys.exc_info())
on_done(f)
else:
result.add_done_callback(on_done)
def on_done(tornado_future):
"""
Set tornado.Future results to the concurrent.Future.
:param tornado_future:
"""
exception = tornado_future.exception()
if not exception:
future.set_result(tornado_future.result())
else:
future.set_exception(exception)
io_loop.add_callback(execute)
return future | [
"def",
"submit",
"(",
"fn",
",",
"io_loop",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"future",
"=",
"Future",
"(",
")",
"def",
"execute",
"(",
")",
":",
"\"\"\"Execute fn on the IOLoop.\"\"\"",
"try",
":",
"result",
"=",
"gen",
".",
"maybe_... | Submit Tornado Coroutine to IOLoop.current().
:param fn: Tornado Coroutine to execute
:param io_loop: Tornado IOLoop where to schedule the coroutine
:param args: Args to pass to coroutine
:param kwargs: Kwargs to pass to coroutine
:returns tornado.concurrent.Future: future result of coroutine | [
"Submit",
"Tornado",
"Coroutine",
"to",
"IOLoop",
".",
"current",
"()",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/util/bottle2.py#L354-L369 | def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app | [
"def",
"mount",
"(",
"self",
",",
"app",
",",
"script_path",
")",
":",
"if",
"not",
"isinstance",
"(",
"app",
",",
"Bottle",
")",
":",
"raise",
"TypeError",
"(",
"'Only Bottle instances are supported for now.'",
")",
"script_path",
"=",
"'/'",
".",
"join",
"... | Mount a Bottle application to a specific URL prefix | [
"Mount",
"a",
"Bottle",
"application",
"to",
"a",
"specific",
"URL",
"prefix"
] | python | train |
Karaage-Cluster/karaage | karaage/people/models.py | https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/people/models.py#L461-L472 | def _add_person_to_group(person, group):
""" Call datastores after adding a person to a group. """
from karaage.datastores import add_accounts_to_group
from karaage.datastores import add_accounts_to_project
from karaage.datastores import add_accounts_to_institute
a_list = person.account_set
add_accounts_to_group(a_list, group)
for project in group.project_set.all():
add_accounts_to_project(a_list, project)
for institute in group.institute_set.all():
add_accounts_to_institute(a_list, institute) | [
"def",
"_add_person_to_group",
"(",
"person",
",",
"group",
")",
":",
"from",
"karaage",
".",
"datastores",
"import",
"add_accounts_to_group",
"from",
"karaage",
".",
"datastores",
"import",
"add_accounts_to_project",
"from",
"karaage",
".",
"datastores",
"import",
... | Call datastores after adding a person to a group. | [
"Call",
"datastores",
"after",
"adding",
"a",
"person",
"to",
"a",
"group",
"."
] | python | train |
pybel/pybel | src/pybel/parser/parse_bel.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_bel.py#L821-L828 | def ensure_node(self, tokens: ParseResults) -> BaseEntity:
"""Turn parsed tokens into canonical node name and makes sure its in the graph."""
if MODIFIER in tokens:
return self.ensure_node(tokens[TARGET])
node = parse_result_to_dsl(tokens)
self.graph.add_node_from_data(node)
return node | [
"def",
"ensure_node",
"(",
"self",
",",
"tokens",
":",
"ParseResults",
")",
"->",
"BaseEntity",
":",
"if",
"MODIFIER",
"in",
"tokens",
":",
"return",
"self",
".",
"ensure_node",
"(",
"tokens",
"[",
"TARGET",
"]",
")",
"node",
"=",
"parse_result_to_dsl",
"(... | Turn parsed tokens into canonical node name and makes sure its in the graph. | [
"Turn",
"parsed",
"tokens",
"into",
"canonical",
"node",
"name",
"and",
"makes",
"sure",
"its",
"in",
"the",
"graph",
"."
] | python | train |
oceanprotocol/squid-py | squid_py/ocean/ocean_agreements.py | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ocean/ocean_agreements.py#L126-L252 | def create(self, did, service_definition_id, agreement_id,
service_agreement_signature, consumer_address, account):
"""
Execute the service agreement on-chain using keeper's ServiceAgreement contract.
The on-chain executeAgreement method requires the following arguments:
templateId, signature, consumer, hashes, timeouts, serviceAgreementId, did.
`agreement_message_hash` is necessary to verify the signature.
The consumer `signature` includes the conditions timeouts and parameters values which
is usedon-chain to verify that the values actually match the signed hashes.
:param did: str representation fo the asset DID. Use this to retrieve the asset DDO.
:param service_definition_id: str identifies the specific service in
the ddo to use in this agreement.
:param agreement_id: 32 bytes identifier created by the consumer and will be used
on-chain for the executed agreement.
:param service_agreement_signature: str the signed agreement message hash which includes
conditions and their parameters values and other details of the agreement.
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement. Can be either the
consumer, publisher or provider
:return: dict the `executeAgreement` transaction receipt
"""
assert consumer_address and Web3Provider.get_web3().isChecksumAddress(
consumer_address), f'Invalid consumer address {consumer_address}'
assert account.address in self._keeper.accounts, \
f'Unrecognized account address {account.address}'
agreement_template_approved = self._keeper.template_manager.is_template_approved(
self._keeper.escrow_access_secretstore_template.address)
if not agreement_template_approved:
msg = (f'The EscrowAccessSecretStoreTemplate contract at address '
f'{self._keeper.escrow_access_secretstore_template.address} is not '
f'approved and cannot be used for creating service agreements.')
logger.warning(msg)
raise OceanInvalidAgreementTemplate(msg)
asset = self._asset_resolver.resolve(did)
asset_id = asset.asset_id
service_agreement = ServiceAgreement.from_ddo(service_definition_id, asset)
agreement_template = self._keeper.escrow_access_secretstore_template
if agreement_template.get_agreement_consumer(agreement_id) is not None:
raise OceanServiceAgreementExists(
f'Service agreement {agreement_id} already exists, cannot reuse '
f'the same agreement id.')
if consumer_address != account.address:
if not self._verify_service_agreement_signature(
did, agreement_id, service_definition_id,
consumer_address, service_agreement_signature,
ddo=asset
):
raise OceanInvalidServiceAgreementSignature(
f'Verifying consumer signature failed: '
f'signature {service_agreement_signature}, '
f'consumerAddress {consumer_address}'
)
publisher_address = Web3Provider.get_web3().toChecksumAddress(asset.publisher)
condition_ids = service_agreement.generate_agreement_condition_ids(
agreement_id, asset_id, consumer_address, publisher_address, self._keeper)
time_locks = service_agreement.conditions_timelocks
time_outs = service_agreement.conditions_timeouts
success = agreement_template.create_agreement(
agreement_id,
asset_id,
condition_ids,
time_locks,
time_outs,
consumer_address,
account
)
if not success:
# success is based on tx receipt which is not reliable.
# So we check on-chain directly to see if agreement_id is there
consumer = self._keeper.escrow_access_secretstore_template.get_agreement_consumer(agreement_id)
if consumer:
success = True
else:
event_log = self._keeper.escrow_access_secretstore_template.subscribe_agreement_created(
agreement_id, 30, None, (), wait=True
)
success = event_log is not None
if success:
logger.info(f'Service agreement {agreement_id} created successfully.')
else:
logger.info(f'Create agreement "{agreement_id}" failed.')
self._log_agreement_info(
asset, service_agreement, agreement_id, service_agreement_signature,
consumer_address, account, condition_ids
)
if success:
# subscribe to events related to this agreement_id
if consumer_address == account.address:
register_service_agreement_consumer(
self._config.storage_path,
publisher_address,
agreement_id,
did,
service_agreement,
service_definition_id,
service_agreement.get_price(),
asset.encrypted_files,
account,
condition_ids,
None
)
else:
register_service_agreement_publisher(
self._config.storage_path,
consumer_address,
agreement_id,
did,
service_agreement,
service_definition_id,
service_agreement.get_price(),
account,
condition_ids
)
return success | [
"def",
"create",
"(",
"self",
",",
"did",
",",
"service_definition_id",
",",
"agreement_id",
",",
"service_agreement_signature",
",",
"consumer_address",
",",
"account",
")",
":",
"assert",
"consumer_address",
"and",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",... | Execute the service agreement on-chain using keeper's ServiceAgreement contract.
The on-chain executeAgreement method requires the following arguments:
templateId, signature, consumer, hashes, timeouts, serviceAgreementId, did.
`agreement_message_hash` is necessary to verify the signature.
The consumer `signature` includes the conditions timeouts and parameters values which
is usedon-chain to verify that the values actually match the signed hashes.
:param did: str representation fo the asset DID. Use this to retrieve the asset DDO.
:param service_definition_id: str identifies the specific service in
the ddo to use in this agreement.
:param agreement_id: 32 bytes identifier created by the consumer and will be used
on-chain for the executed agreement.
:param service_agreement_signature: str the signed agreement message hash which includes
conditions and their parameters values and other details of the agreement.
:param consumer_address: ethereum account address of consumer, hex str
:param account: Account instance creating the agreement. Can be either the
consumer, publisher or provider
:return: dict the `executeAgreement` transaction receipt | [
"Execute",
"the",
"service",
"agreement",
"on",
"-",
"chain",
"using",
"keeper",
"s",
"ServiceAgreement",
"contract",
"."
] | python | train |
beelit94/python-terraform | python_terraform/__init__.py | https://github.com/beelit94/python-terraform/blob/99950cb03c37abadb0d7e136452e43f4f17dd4e1/python_terraform/__init__.py#L181-L244 | def generate_cmd_string(self, cmd, *args, **kwargs):
"""
for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command
"""
cmds = cmd.split()
cmds = [self.terraform_bin_path] + cmds
for option, value in kwargs.items():
if '_' in option:
option = option.replace('_', '-')
if type(value) is list:
for sub_v in value:
cmds += ['-{k}={v}'.format(k=option, v=sub_v)]
continue
if type(value) is dict:
if 'backend-config' in option:
for bk, bv in value.items():
cmds += ['-backend-config={k}={v}'.format(k=bk, v=bv)]
continue
# since map type sent in string won't work, create temp var file for
# variables, and clean it up later
else:
filename = self.temp_var_files.create(value)
cmds += ['-var-file={0}'.format(filename)]
continue
# simple flag,
if value is IsFlagged:
cmds += ['-{k}'.format(k=option)]
continue
if value is None or value is IsNotFlagged:
continue
if type(value) is bool:
value = 'true' if value else 'false'
cmds += ['-{k}={v}'.format(k=option, v=value)]
cmds += args
return cmds | [
"def",
"generate_cmd_string",
"(",
"self",
",",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cmds",
"=",
"cmd",
".",
"split",
"(",
")",
"cmds",
"=",
"[",
"self",
".",
"terraform_bin_path",
"]",
"+",
"cmds",
"for",
"option",
",",
"va... | for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command | [
"for",
"any",
"generate_cmd_string",
"doesn",
"t",
"written",
"as",
"public",
"method",
"of",
"terraform"
] | python | train |
numba/llvmlite | docs/source/user-guide/examples/ll_fpadd.py | https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/docs/source/user-guide/examples/ll_fpadd.py#L26-L38 | def create_execution_engine():
"""
Create an ExecutionEngine suitable for JIT code generation on
the host CPU. The engine is reusable for an arbitrary number of
modules.
"""
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
return engine | [
"def",
"create_execution_engine",
"(",
")",
":",
"# Create a target machine representing the host",
"target",
"=",
"llvm",
".",
"Target",
".",
"from_default_triple",
"(",
")",
"target_machine",
"=",
"target",
".",
"create_target_machine",
"(",
")",
"# And an execution eng... | Create an ExecutionEngine suitable for JIT code generation on
the host CPU. The engine is reusable for an arbitrary number of
modules. | [
"Create",
"an",
"ExecutionEngine",
"suitable",
"for",
"JIT",
"code",
"generation",
"on",
"the",
"host",
"CPU",
".",
"The",
"engine",
"is",
"reusable",
"for",
"an",
"arbitrary",
"number",
"of",
"modules",
"."
] | python | train |
resonai/ybt | yabt/buildcontext.py | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/buildcontext.py#L143-L146 | def generate_all_deps(self, target: Target):
"""Generate all dependencies of `target` (the target nodes)."""
yield from (self.targets[dep_name]
for dep_name in self.generate_dep_names(target)) | [
"def",
"generate_all_deps",
"(",
"self",
",",
"target",
":",
"Target",
")",
":",
"yield",
"from",
"(",
"self",
".",
"targets",
"[",
"dep_name",
"]",
"for",
"dep_name",
"in",
"self",
".",
"generate_dep_names",
"(",
"target",
")",
")"
] | Generate all dependencies of `target` (the target nodes). | [
"Generate",
"all",
"dependencies",
"of",
"target",
"(",
"the",
"target",
"nodes",
")",
"."
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L372-L382 | def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets and sources.
"""
env = self.get_build_env()
try:
cwd = self.batches[0].targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd,
self.get_all_targets(),
self.get_all_sources()) | [
"def",
"get_build_scanner_path",
"(",
"self",
",",
"scanner",
")",
":",
"env",
"=",
"self",
".",
"get_build_env",
"(",
")",
"try",
":",
"cwd",
"=",
"self",
".",
"batches",
"[",
"0",
"]",
".",
"targets",
"[",
"0",
"]",
".",
"cwd",
"except",
"(",
"In... | Fetch the scanner path for this executor's targets and sources. | [
"Fetch",
"the",
"scanner",
"path",
"for",
"this",
"executor",
"s",
"targets",
"and",
"sources",
"."
] | python | train |
nugget/python-insteonplm | insteonplm/states/onOff.py | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L581-L585 | def set_trigger_group_bit_mask(self, trigger_group_bit_mask):
"""Set the trigger_group_bit_mask for the current group/button."""
set_cmd = self._create_set_property_msg("_trigger_group_bit_mask",
0x0c, trigger_group_bit_mask)
self._send_method(set_cmd, self._property_set) | [
"def",
"set_trigger_group_bit_mask",
"(",
"self",
",",
"trigger_group_bit_mask",
")",
":",
"set_cmd",
"=",
"self",
".",
"_create_set_property_msg",
"(",
"\"_trigger_group_bit_mask\"",
",",
"0x0c",
",",
"trigger_group_bit_mask",
")",
"self",
".",
"_send_method",
"(",
"... | Set the trigger_group_bit_mask for the current group/button. | [
"Set",
"the",
"trigger_group_bit_mask",
"for",
"the",
"current",
"group",
"/",
"button",
"."
] | python | train |
ianclegg/winrmlib | winrmlib/api/session.py | https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L62-L68 | def put(self, resource, obj,
operation_timeout=None, max_envelope_size=None, locale=None):
"""
resource can be a URL or a ResourceLocator
"""
headers = None
return self.service.invoke(headers, obj) | [
"def",
"put",
"(",
"self",
",",
"resource",
",",
"obj",
",",
"operation_timeout",
"=",
"None",
",",
"max_envelope_size",
"=",
"None",
",",
"locale",
"=",
"None",
")",
":",
"headers",
"=",
"None",
"return",
"self",
".",
"service",
".",
"invoke",
"(",
"h... | resource can be a URL or a ResourceLocator | [
"resource",
"can",
"be",
"a",
"URL",
"or",
"a",
"ResourceLocator"
] | python | train |
SatelliteQE/nailgun | nailgun/client.py | https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/client.py#L70-L85 | def _set_content_type(kwargs):
"""If the 'content-type' header is unset, set it to 'applcation/json'.
The 'content-type' will not be set if doing a file upload as requests will
automatically set it.
:param kwargs: A ``dict``. The keyword args supplied to :func:`request` or
one of the convenience functions like it.
:return: Nothing. ``kwargs`` is modified in-place.
"""
if 'files' in kwargs:
return # requests will automatically set the content-type
headers = kwargs.pop('headers', {})
headers.setdefault('content-type', 'application/json')
kwargs['headers'] = headers | [
"def",
"_set_content_type",
"(",
"kwargs",
")",
":",
"if",
"'files'",
"in",
"kwargs",
":",
"return",
"# requests will automatically set the content-type",
"headers",
"=",
"kwargs",
".",
"pop",
"(",
"'headers'",
",",
"{",
"}",
")",
"headers",
".",
"setdefault",
"... | If the 'content-type' header is unset, set it to 'applcation/json'.
The 'content-type' will not be set if doing a file upload as requests will
automatically set it.
:param kwargs: A ``dict``. The keyword args supplied to :func:`request` or
one of the convenience functions like it.
:return: Nothing. ``kwargs`` is modified in-place. | [
"If",
"the",
"content",
"-",
"type",
"header",
"is",
"unset",
"set",
"it",
"to",
"applcation",
"/",
"json",
"."
] | python | train |
pymc-devs/pymc | pymc/utils.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L435-L450 | def _optimize_binning(x, range, method='Freedman'):
"""Find the optimal number of bins.
Available methods : Freedman, Scott
"""
N = x.shape[0]
if method.lower() == 'freedman':
s = sort(x)
IQR = s[int(N * .75)] - s[int(
N * .25)] # Interquantile range (75% -25%)
width = 2 * IQR * N ** (-1. / 3)
elif method.lower() == 'scott':
width = 3.49 * x.std() * N ** (-1. / 3)
else:
raise ValueError('Method must be Scott or Freedman', method)
return int(diff(range) / width) | [
"def",
"_optimize_binning",
"(",
"x",
",",
"range",
",",
"method",
"=",
"'Freedman'",
")",
":",
"N",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'freedman'",
":",
"s",
"=",
"sort",
"(",
"x",
")",
"IQR",
... | Find the optimal number of bins.
Available methods : Freedman, Scott | [
"Find",
"the",
"optimal",
"number",
"of",
"bins",
".",
"Available",
"methods",
":",
"Freedman",
"Scott"
] | python | train |
rsheftel/raccoon | raccoon/dataframe.py | https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1070-L1082 | def add(self, left_column, right_column, indexes=None):
"""
Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math
on that sub-set of the columns.
:param left_column: first column name
:param right_column: second column name
:param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\
length as the DataFrame
:return: list
"""
left_list, right_list = self._get_lists(left_column, right_column, indexes)
return [l + r for l, r in zip(left_list, right_list)] | [
"def",
"add",
"(",
"self",
",",
"left_column",
",",
"right_column",
",",
"indexes",
"=",
"None",
")",
":",
"left_list",
",",
"right_list",
"=",
"self",
".",
"_get_lists",
"(",
"left_column",
",",
"right_column",
",",
"indexes",
")",
"return",
"[",
"l",
"... | Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math
on that sub-set of the columns.
:param left_column: first column name
:param right_column: second column name
:param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\
length as the DataFrame
:return: list | [
"Math",
"helper",
"method",
"that",
"adds",
"element",
"-",
"wise",
"two",
"columns",
".",
"If",
"indexes",
"are",
"not",
"None",
"then",
"will",
"only",
"perform",
"the",
"math",
"on",
"that",
"sub",
"-",
"set",
"of",
"the",
"columns",
"."
] | python | train |
EmbodiedCognition/pagoda | pagoda/parser.py | https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/parser.py#L14-L36 | def parse(source, world, jointgroup=None, density=1000, color=None):
'''Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
'''
visitor = Visitor(world, jointgroup, density, color)
visitor.parse(re.sub(r'#.*', ' ', source.read()))
return visitor | [
"def",
"parse",
"(",
"source",
",",
"world",
",",
"jointgroup",
"=",
"None",
",",
"density",
"=",
"1000",
",",
"color",
"=",
"None",
")",
":",
"visitor",
"=",
"Visitor",
"(",
"world",
",",
"jointgroup",
",",
"density",
",",
"color",
")",
"visitor",
"... | Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies. | [
"Load",
"and",
"parse",
"a",
"source",
"file",
"."
] | python | valid |
addok/addok | addok/fuzzy.py | https://github.com/addok/addok/blob/46a270d76ec778d2b445c2be753e5c6ba070a9b2/addok/fuzzy.py#L107-L118 | def do_fuzzyindex(self, word):
"""Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas"""
word = list(preprocess_query(word))[0]
token = Token(word)
neighbors = make_fuzzy(token)
neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors]
neighbors.sort(key=lambda n: n[1], reverse=True)
for token, freq in neighbors:
if freq == 0:
break
print(white(token), blue(freq)) | [
"def",
"do_fuzzyindex",
"(",
"self",
",",
"word",
")",
":",
"word",
"=",
"list",
"(",
"preprocess_query",
"(",
"word",
")",
")",
"[",
"0",
"]",
"token",
"=",
"Token",
"(",
"word",
")",
"neighbors",
"=",
"make_fuzzy",
"(",
"token",
")",
"neighbors",
"... | Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas | [
"Compute",
"fuzzy",
"extensions",
"of",
"word",
"that",
"exist",
"in",
"index",
".",
"FUZZYINDEX",
"lilas"
] | python | test |
sloria/konch | konch.py | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L1139-L1184 | def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn:
"""Main entry point for the konch CLI."""
args = parse_args(argv)
if args["--debug"]:
logging.basicConfig(
format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG
)
logger.debug(args)
config_file: typing.Union[Path, None]
if args["init"]:
config_file = Path(args["<config_file>"] or CONFIG_FILE)
init_config(config_file)
else:
config_file = Path(args["<config_file>"]) if args["<config_file>"] else None
if args["edit"]:
edit_config(config_file)
elif args["allow"]:
allow_config(config_file)
elif args["deny"]:
deny_config(config_file)
mod = use_file(Path(args["--file"]) if args["--file"] else None)
if hasattr(mod, "setup"):
mod.setup() # type: ignore
if args["--name"]:
if args["--name"] not in _config_registry:
print_error(f'Invalid --name: "{args["--name"]}"')
sys.exit(1)
config_dict = _config_registry[args["--name"]]
logger.debug(f'Using named config: "{args["--name"]}"')
logger.debug(config_dict)
else:
config_dict = _cfg
# Allow default shell to be overriden by command-line argument
shell_name = args["--shell"]
if shell_name:
config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell)
logger.debug(f"Starting with config {config_dict}")
start(**config_dict)
if hasattr(mod, "teardown"):
mod.teardown() # type: ignore
sys.exit(0) | [
"def",
"main",
"(",
"argv",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Sequence",
"]",
"=",
"None",
")",
"->",
"typing",
".",
"NoReturn",
":",
"args",
"=",
"parse_args",
"(",
"argv",
")",
"if",
"args",
"[",
"\"--debug\"",
"]",
":",
"logging... | Main entry point for the konch CLI. | [
"Main",
"entry",
"point",
"for",
"the",
"konch",
"CLI",
"."
] | python | train |
Schwanksta/python-arcgis-rest-query | arcgis/arcgis.py | https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L106-L117 | def get_descriptor_for_layer(self, layer):
"""
Returns the standard JSON descriptor for the layer. There is a lot of
usefule information in there.
"""
if not layer in self._layer_descriptor_cache:
params = {'f': 'pjson'}
if self.token:
params['token'] = self.token
response = requests.get(self._build_request(layer), params=params)
self._layer_descriptor_cache[layer] = response.json()
return self._layer_descriptor_cache[layer] | [
"def",
"get_descriptor_for_layer",
"(",
"self",
",",
"layer",
")",
":",
"if",
"not",
"layer",
"in",
"self",
".",
"_layer_descriptor_cache",
":",
"params",
"=",
"{",
"'f'",
":",
"'pjson'",
"}",
"if",
"self",
".",
"token",
":",
"params",
"[",
"'token'",
"]... | Returns the standard JSON descriptor for the layer. There is a lot of
usefule information in there. | [
"Returns",
"the",
"standard",
"JSON",
"descriptor",
"for",
"the",
"layer",
".",
"There",
"is",
"a",
"lot",
"of",
"usefule",
"information",
"in",
"there",
"."
] | python | train |
klahnakoski/mo-logs | mo_logs/strings.py | https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/strings.py#L458-L470 | def quote(value):
"""
return JSON-quoted value
:param value:
:return:
"""
if value == None:
output = ""
elif is_text(value):
output = encode_basestring(value)
else:
output = _json.dumps(value)
return output | [
"def",
"quote",
"(",
"value",
")",
":",
"if",
"value",
"==",
"None",
":",
"output",
"=",
"\"\"",
"elif",
"is_text",
"(",
"value",
")",
":",
"output",
"=",
"encode_basestring",
"(",
"value",
")",
"else",
":",
"output",
"=",
"_json",
".",
"dumps",
"(",... | return JSON-quoted value
:param value:
:return: | [
"return",
"JSON",
"-",
"quoted",
"value",
":",
"param",
"value",
":",
":",
"return",
":"
] | python | train |
waqasbhatti/astrobase | astrobase/cpserver/checkplotserver_handlers.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/cpserver/checkplotserver_handlers.py#L1168-L1233 | def post(self):
'''This handles POST requests.
Saves the changes made by the user on the frontend back to the current
checkplot-list.json file.
'''
# if self.readonly is set, then don't accept any changes
# return immediately with a 400
if self.readonly:
msg = "checkplotserver is in readonly mode. no updates allowed."
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
objectid = self.get_argument('objectid', None)
changes = self.get_argument('changes',None)
# if either of the above is invalid, return nothing
if not objectid or not changes:
msg = ("could not parse changes to the checkplot filelist "
"from the frontend")
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# otherwise, update the checkplot list JSON
objectid = xhtml_escape(objectid)
changes = json.loads(changes)
# update the dictionary
if 'reviewed' not in self.currentproject:
self.currentproject['reviewed'] = {}
self.currentproject['reviewed'][objectid] = changes
# update the JSON file
with open(self.cplistfile,'w') as outfd:
json.dump(self.currentproject, outfd)
# return status
msg = ("wrote all changes to the checkplot filelist "
"from the frontend for object: %s" % objectid)
LOGGER.info(msg)
resultdict = {'status':'success',
'message':msg,
'readonly':self.readonly,
'result':{'objectid':objectid,
'changes':changes}}
self.write(resultdict)
self.finish() | [
"def",
"post",
"(",
"self",
")",
":",
"# if self.readonly is set, then don't accept any changes",
"# return immediately with a 400",
"if",
"self",
".",
"readonly",
":",
"msg",
"=",
"\"checkplotserver is in readonly mode. no updates allowed.\"",
"resultdict",
"=",
"{",
"'status'... | This handles POST requests.
Saves the changes made by the user on the frontend back to the current
checkplot-list.json file. | [
"This",
"handles",
"POST",
"requests",
"."
] | python | valid |
hyperledger/indy-sdk | vcx/wrappers/python3/vcx/api/disclosed_proof.py | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/disclosed_proof.py#L204-L225 | async def get_creds(self) -> dict:
"""
Gets the credentials from a disclosed proof
Example:
msg_id = '1'
phone_number = '8019119191'
connection = await Connection.create(source_id)
await connection.connect(phone_number)
disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id)
creds = await disclosed_proof.get_creds()
:return: credentials
"""
if not hasattr(DisclosedProof.get_creds, "cb"):
self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback")
DisclosedProof.get_creds.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_disclosed_proof_handle = c_uint32(self.handle)
data = await do_call('vcx_disclosed_proof_retrieve_credentials',
c_disclosed_proof_handle,
DisclosedProof.get_creds.cb)
return json.loads(data.decode()) | [
"async",
"def",
"get_creds",
"(",
"self",
")",
"->",
"dict",
":",
"if",
"not",
"hasattr",
"(",
"DisclosedProof",
".",
"get_creds",
",",
"\"cb\"",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"vcx_disclosed_proof_retrieve_credentials: Creating callback\"",... | Gets the credentials from a disclosed proof
Example:
msg_id = '1'
phone_number = '8019119191'
connection = await Connection.create(source_id)
await connection.connect(phone_number)
disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id)
creds = await disclosed_proof.get_creds()
:return: credentials | [
"Gets",
"the",
"credentials",
"from",
"a",
"disclosed",
"proof",
"Example",
":",
"msg_id",
"=",
"1",
"phone_number",
"=",
"8019119191",
"connection",
"=",
"await",
"Connection",
".",
"create",
"(",
"source_id",
")",
"await",
"connection",
".",
"connect",
"(",
... | python | train |
ryanmcgrath/twython | twython/streaming/types.py | https://github.com/ryanmcgrath/twython/blob/7366de80efcbbdfaf615d3f1fea72546196916fc/twython/streaming/types.py#L23-L31 | def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params) | [
"def",
"user",
"(",
"self",
",",
"*",
"*",
"params",
")",
":",
"url",
"=",
"'https://userstream.twitter.com/%s/user.json'",
"%",
"self",
".",
"streamer",
".",
"api_version",
"self",
".",
"streamer",
".",
"_request",
"(",
"url",
",",
"params",
"=",
"params",
... | Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user | [
"Stream",
"user"
] | python | train |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/davinci/wordfreq.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/share/doc/ipython/examples/parallel/davinci/wordfreq.py#L9-L18 | def wordfreq(text, is_filename=False):
"""Return a dictionary of words and word counts in a string."""
if is_filename:
with open(text) as f:
text = f.read()
freqs = {}
for word in text.split():
lword = word.lower()
freqs[lword] = freqs.get(lword, 0) + 1
return freqs | [
"def",
"wordfreq",
"(",
"text",
",",
"is_filename",
"=",
"False",
")",
":",
"if",
"is_filename",
":",
"with",
"open",
"(",
"text",
")",
"as",
"f",
":",
"text",
"=",
"f",
".",
"read",
"(",
")",
"freqs",
"=",
"{",
"}",
"for",
"word",
"in",
"text",
... | Return a dictionary of words and word counts in a string. | [
"Return",
"a",
"dictionary",
"of",
"words",
"and",
"word",
"counts",
"in",
"a",
"string",
"."
] | python | test |
trailofbits/manticore | manticore/platforms/linux.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L2218-L2230 | def awake(self, procid):
""" Remove procid from waitlists and reestablish it in the running list """
logger.debug(f"Remove procid:{procid} from waitlists and reestablish it in the running list")
for wait_list in self.rwait:
if procid in wait_list:
wait_list.remove(procid)
for wait_list in self.twait:
if procid in wait_list:
wait_list.remove(procid)
self.timers[procid] = None
self.running.append(procid)
if self._current is None:
self._current = procid | [
"def",
"awake",
"(",
"self",
",",
"procid",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Remove procid:{procid} from waitlists and reestablish it in the running list\"",
")",
"for",
"wait_list",
"in",
"self",
".",
"rwait",
":",
"if",
"procid",
"in",
"wait_list",
":",
... | Remove procid from waitlists and reestablish it in the running list | [
"Remove",
"procid",
"from",
"waitlists",
"and",
"reestablish",
"it",
"in",
"the",
"running",
"list"
] | python | valid |
Hackerfleet/hfos | hfos/tool/installer.py | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/tool/installer.py#L377-L380 | def service(ctx):
"""Install systemd service configuration"""
install_service(ctx.obj['instance'], ctx.obj['dbhost'], ctx.obj['dbname'], ctx.obj['port']) | [
"def",
"service",
"(",
"ctx",
")",
":",
"install_service",
"(",
"ctx",
".",
"obj",
"[",
"'instance'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'dbhost'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'dbname'",
"]",
",",
"ctx",
".",
"obj",
"[",
"'port'",
"]",
")"... | Install systemd service configuration | [
"Install",
"systemd",
"service",
"configuration"
] | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/rbridge_id/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/__init__.py#L1598-L1619 | def _set_telnet(self, v, load=False):
"""
Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """telnet must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=telnet.telnet, is_container='container', presence=False, yang_name="telnet", rest_name="telnet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Telnet Server', u'cli-incomplete-no': None, u'sort-priority': u'1'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)""",
})
self.__telnet = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_telnet",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
... | Setter method for telnet, mapped from YANG variable /rbridge_id/telnet (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_telnet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_telnet() directly. | [
"Setter",
"method",
"for",
"telnet",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"telnet",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"fil... | python | train |
happyleavesaoc/python-limitlessled | limitlessled/bridge.py | https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/bridge.py#L227-L239 | def _send_raw(self, command):
"""
Sends an raw command directly to the physical bridge.
:param command: A bytearray.
"""
try:
self._socket.send(bytearray(command))
self._sn = (self._sn + 1) % 256
return True
except (socket.error, socket.timeout):
# We can get a socket.error or timeout exception if the bridge is disconnected,
# but we are still sending data. In that case, return False to indicate that data is not sent.
return False | [
"def",
"_send_raw",
"(",
"self",
",",
"command",
")",
":",
"try",
":",
"self",
".",
"_socket",
".",
"send",
"(",
"bytearray",
"(",
"command",
")",
")",
"self",
".",
"_sn",
"=",
"(",
"self",
".",
"_sn",
"+",
"1",
")",
"%",
"256",
"return",
"True",... | Sends an raw command directly to the physical bridge.
:param command: A bytearray. | [
"Sends",
"an",
"raw",
"command",
"directly",
"to",
"the",
"physical",
"bridge",
".",
":",
"param",
"command",
":",
"A",
"bytearray",
"."
] | python | train |
tjcsl/cslbot | cslbot/commands/fortune.py | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/fortune.py#L23-L35 | def cmd(send, msg, args):
"""Returns a fortune.
Syntax: {command} <list|[-a|-o] [module]>
"""
if msg == 'list':
fortunes = list_fortunes() + list_fortunes(True)
send(" ".join(fortunes), ignore_length=True)
else:
output = get_fortune(msg, args['name'])
for line in output.splitlines():
send(line) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"if",
"msg",
"==",
"'list'",
":",
"fortunes",
"=",
"list_fortunes",
"(",
")",
"+",
"list_fortunes",
"(",
"True",
")",
"send",
"(",
"\" \"",
".",
"join",
"(",
"fortunes",
")",
",",
"ignor... | Returns a fortune.
Syntax: {command} <list|[-a|-o] [module]> | [
"Returns",
"a",
"fortune",
"."
] | python | train |
jpscaletti/solution | solution/fields/splitted_datetime.py | https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/splitted_datetime.py#L69-L74 | def _to_timezone(self, dt):
"""Takes a naive timezone with an utc value and return it formatted as a
local timezone."""
tz = self._get_tz()
utc_dt = pytz.utc.localize(dt)
return utc_dt.astimezone(tz) | [
"def",
"_to_timezone",
"(",
"self",
",",
"dt",
")",
":",
"tz",
"=",
"self",
".",
"_get_tz",
"(",
")",
"utc_dt",
"=",
"pytz",
".",
"utc",
".",
"localize",
"(",
"dt",
")",
"return",
"utc_dt",
".",
"astimezone",
"(",
"tz",
")"
] | Takes a naive timezone with an utc value and return it formatted as a
local timezone. | [
"Takes",
"a",
"naive",
"timezone",
"with",
"an",
"utc",
"value",
"and",
"return",
"it",
"formatted",
"as",
"a",
"local",
"timezone",
"."
] | python | train |
avalente/appmetrics | appmetrics/histogram.py | https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/histogram.py#L59-L69 | def add(self, value):
"""
Add a value to the reservoir
The value will be casted to a floating-point, so a TypeError or a
ValueError may be raised.
"""
if not isinstance(value, float):
value = float(value)
return self._do_add(value) | [
"def",
"add",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"value",
"=",
"float",
"(",
"value",
")",
"return",
"self",
".",
"_do_add",
"(",
"value",
")"
] | Add a value to the reservoir
The value will be casted to a floating-point, so a TypeError or a
ValueError may be raised. | [
"Add",
"a",
"value",
"to",
"the",
"reservoir",
"The",
"value",
"will",
"be",
"casted",
"to",
"a",
"floating",
"-",
"point",
"so",
"a",
"TypeError",
"or",
"a",
"ValueError",
"may",
"be",
"raised",
"."
] | python | train |
MatterMiners/cobald | cobald/daemon/runners/asyncio_runner.py | https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/runners/asyncio_runner.py#L60-L68 | async def _cancel_payloads(self):
"""Cancel all remaining payloads"""
for task in self._tasks:
task.cancel()
await asyncio.sleep(0)
for task in self._tasks:
while not task.done():
await asyncio.sleep(0.1)
task.cancel() | [
"async",
"def",
"_cancel_payloads",
"(",
"self",
")",
":",
"for",
"task",
"in",
"self",
".",
"_tasks",
":",
"task",
".",
"cancel",
"(",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0",
")",
"for",
"task",
"in",
"self",
".",
"_tasks",
":",
"while",
... | Cancel all remaining payloads | [
"Cancel",
"all",
"remaining",
"payloads"
] | python | train |
ozak/georasters | georasters/georasters.py | https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L917-L941 | def resize_old(self, block_size, order=0, mode='constant', cval=False):
'''
geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster
'''
if not cval:
cval = np.nan
if (self.raster.dtype.name.find('float') != -1 and
np.max(np.abs([self.max(), self.min()])) > 1):
raster2 = (self.raster-self.min())/(self.max()-self.min())
else:
raster2 = self.raster.copy()
raster2 = raster2.astype(float)
raster2[self.raster.mask] = np.nan
raster2 = resize(raster2, block_size, order=order, mode=mode, cval=cval)
raster2 = np.ma.masked_array(raster2, mask=np.isnan(raster2),
fill_value=self.raster.fill_value)
raster2 = raster2*(self.max()-self.min())+self.min()
raster2[raster2.mask] = self.nodata_value
raster2.mask = np.logical_or(np.isnan(raster2.data), raster2.data == self.nodata_value)
geot = list(self.geot)
[geot[-1], geot[1]] = np.array([geot[-1], geot[1]])*self.shape/block_size
return GeoRaster(raster2, tuple(geot), nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) | [
"def",
"resize_old",
"(",
"self",
",",
"block_size",
",",
"order",
"=",
"0",
",",
"mode",
"=",
"'constant'",
",",
"cval",
"=",
"False",
")",
":",
"if",
"not",
"cval",
":",
"cval",
"=",
"np",
".",
"nan",
"if",
"(",
"self",
".",
"raster",
".",
"dty... | geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True)
Returns resized georaster | [
"geo",
".",
"resize",
"(",
"new_shape",
"order",
"=",
"0",
"mode",
"=",
"constant",
"cval",
"=",
"np",
".",
"nan",
"preserve_range",
"=",
"True",
")"
] | python | train |
soldag/python-pwmled | pwmled/led/__init__.py | https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/__init__.py#L78-L85 | def _update_pwm(self):
"""Update the pwm values of the driver regarding the current state."""
if self._is_on:
values = self._get_pwm_values()
else:
values = [0] * len(self._driver.pins)
self._driver.set_pwm(values) | [
"def",
"_update_pwm",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_on",
":",
"values",
"=",
"self",
".",
"_get_pwm_values",
"(",
")",
"else",
":",
"values",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"self",
".",
"_driver",
".",
"pins",
")",
"self",
... | Update the pwm values of the driver regarding the current state. | [
"Update",
"the",
"pwm",
"values",
"of",
"the",
"driver",
"regarding",
"the",
"current",
"state",
"."
] | python | train |
googleapis/google-cloud-python | bigquery/google/cloud/bigquery/dataset.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/dataset.py#L41-L53 | def _get_model_reference(self, model_id):
"""Constructs a ModelReference.
Args:
model_id (str): the ID of the model.
Returns:
google.cloud.bigquery.model.ModelReference:
A ModelReference for a model in this dataset.
"""
return ModelReference.from_api_repr(
{"projectId": self.project, "datasetId": self.dataset_id, "modelId": model_id}
) | [
"def",
"_get_model_reference",
"(",
"self",
",",
"model_id",
")",
":",
"return",
"ModelReference",
".",
"from_api_repr",
"(",
"{",
"\"projectId\"",
":",
"self",
".",
"project",
",",
"\"datasetId\"",
":",
"self",
".",
"dataset_id",
",",
"\"modelId\"",
":",
"mod... | Constructs a ModelReference.
Args:
model_id (str): the ID of the model.
Returns:
google.cloud.bigquery.model.ModelReference:
A ModelReference for a model in this dataset. | [
"Constructs",
"a",
"ModelReference",
"."
] | python | train |
tanghaibao/jcvi | jcvi/utils/progressbar.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L330-L348 | def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
if pbar.finished: return '%s%s%s' % (left, width * marker, right)
position = int(pbar.currval % (width * 2 - 1))
if position > width: position = width * 2 - position
lpad = self.fill * (position - 1)
rpad = self.fill * (width - len(marker) - len(lpad))
# Swap if we want to bounce the other way
if not self.fill_left: rpad, lpad = lpad, rpad
return '%s%s%s%s%s' % (left, lpad, marker, rpad, right) | [
"def",
"update",
"(",
"self",
",",
"pbar",
",",
"width",
")",
":",
"left",
",",
"marker",
",",
"right",
"=",
"(",
"format_updatable",
"(",
"i",
",",
"pbar",
")",
"for",
"i",
"in",
"(",
"self",
".",
"left",
",",
"self",
".",
"marker",
",",
"self",... | Updates the progress bar and its subcomponents | [
"Updates",
"the",
"progress",
"bar",
"and",
"its",
"subcomponents"
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/isis_state/router_isis_config/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/router_isis_config/__init__.py#L1310-L1331 | def _set_l1_spf6_timer(self, v, load=False):
"""
Setter method for l1_spf6_timer, mapped from YANG variable /isis_state/router_isis_config/l1_spf6_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_l1_spf6_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l1_spf6_timer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=l1_spf6_timer.l1_spf6_timer, is_container='container', presence=False, yang_name="l1-spf6-timer", rest_name="l1-spf6-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-spf-timer-l1-spf6-timer-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l1_spf6_timer must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=l1_spf6_timer.l1_spf6_timer, is_container='container', presence=False, yang_name="l1-spf6-timer", rest_name="l1-spf6-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-spf-timer-l1-spf6-timer-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__l1_spf6_timer = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_l1_spf6_timer",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"... | Setter method for l1_spf6_timer, mapped from YANG variable /isis_state/router_isis_config/l1_spf6_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_l1_spf6_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l1_spf6_timer() directly. | [
"Setter",
"method",
"for",
"l1_spf6_timer",
"mapped",
"from",
"YANG",
"variable",
"/",
"isis_state",
"/",
"router_isis_config",
"/",
"l1_spf6_timer",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
... | python | train |
log2timeline/plaso | plaso/engine/zeromq_queue.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/zeromq_queue.py#L608-L615 | def _CreateZMQSocket(self):
"""Creates a ZeroMQ socket as well as a regular queue and a thread."""
super(ZeroMQBufferedQueue, self)._CreateZMQSocket()
if not self._zmq_thread:
thread_name = '{0:s}_zmq_responder'.format(self.name)
self._zmq_thread = threading.Thread(
target=self._ZeroMQResponder, args=[self._queue], name=thread_name)
self._zmq_thread.start() | [
"def",
"_CreateZMQSocket",
"(",
"self",
")",
":",
"super",
"(",
"ZeroMQBufferedQueue",
",",
"self",
")",
".",
"_CreateZMQSocket",
"(",
")",
"if",
"not",
"self",
".",
"_zmq_thread",
":",
"thread_name",
"=",
"'{0:s}_zmq_responder'",
".",
"format",
"(",
"self",
... | Creates a ZeroMQ socket as well as a regular queue and a thread. | [
"Creates",
"a",
"ZeroMQ",
"socket",
"as",
"well",
"as",
"a",
"regular",
"queue",
"and",
"a",
"thread",
"."
] | python | train |
EdwinvO/pyutillib | pyutillib/date_utils.py | https://github.com/EdwinvO/pyutillib/blob/6d773c31d1f27cc5256d47feb8afb5c3ae5f0db5/pyutillib/date_utils.py#L273-L279 | def delta(self, fromdate, todate):
'''
Return the number of dates in the list between <fromdate> and
<todate>.
'''
#CONSIDER: raise an exception if a date is not in self
return self.index(todate) - self.index(fromdate) | [
"def",
"delta",
"(",
"self",
",",
"fromdate",
",",
"todate",
")",
":",
"#CONSIDER: raise an exception if a date is not in self",
"return",
"self",
".",
"index",
"(",
"todate",
")",
"-",
"self",
".",
"index",
"(",
"fromdate",
")"
] | Return the number of dates in the list between <fromdate> and
<todate>. | [
"Return",
"the",
"number",
"of",
"dates",
"in",
"the",
"list",
"between",
"<fromdate",
">",
"and",
"<todate",
">",
"."
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_vendor/pkg_resources/__init__.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L2053-L2064 | def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ()) | [
"def",
"resolve_egg_link",
"(",
"path",
")",
":",
"referenced_paths",
"=",
"non_empty_lines",
"(",
"path",
")",
"resolved_paths",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"ref",
")",
"for"... | Given a path to an .egg-link, resolve distributions
present in the referenced path. | [
"Given",
"a",
"path",
"to",
"an",
".",
"egg",
"-",
"link",
"resolve",
"distributions",
"present",
"in",
"the",
"referenced",
"path",
"."
] | python | train |
ofek/depq | run_performance_check.py | https://github.com/ofek/depq/blob/370e3ad503d3e9cedc3c49dc64add393ba945764/run_performance_check.py#L33-L61 | def linear_insert(self, item, priority):
"""Linear search. Performance is O(n^2)."""
with self.lock:
self_data = self.data
rotate = self_data.rotate
maxlen = self._maxlen
length = len(self_data)
count = length
# in practice, this is better than doing a rotate(-1) every
# loop and getting self.data[0] each time only because deque
# implements a very efficient iterator in C
for i in self_data:
if priority > i[1]:
break
count -= 1
rotate(-count)
self_data.appendleft((item, priority))
rotate(length-count)
try:
self.items[item] += 1
except TypeError:
self.items[repr(item)] += 1
if maxlen is not None and maxlen < len(self_data):
self._poplast() | [
"def",
"linear_insert",
"(",
"self",
",",
"item",
",",
"priority",
")",
":",
"with",
"self",
".",
"lock",
":",
"self_data",
"=",
"self",
".",
"data",
"rotate",
"=",
"self_data",
".",
"rotate",
"maxlen",
"=",
"self",
".",
"_maxlen",
"length",
"=",
"len"... | Linear search. Performance is O(n^2). | [
"Linear",
"search",
".",
"Performance",
"is",
"O",
"(",
"n^2",
")",
"."
] | python | train |
crunchyroll/ef-open | efopen/ef_utils.py | https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_utils.py#L143-L170 | def get_instance_aws_context(ec2_client):
"""
Returns: a dictionary of aws context
dictionary will contain these entries:
region, instance_id, account, role, env, env_short, service
Raises: IOError if couldn't read metadata or lookup attempt failed
"""
result = {}
try:
result["region"] = http_get_metadata("placement/availability-zone/")
result["region"] = result["region"][:-1]
result["instance_id"] = http_get_metadata('instance-id')
except IOError as error:
raise IOError("Error looking up metadata:availability-zone or instance-id: " + repr(error))
try:
instance_desc = ec2_client.describe_instances(InstanceIds=[result["instance_id"]])
except Exception as error:
raise IOError("Error calling describe_instances: " + repr(error))
result["account"] = instance_desc["Reservations"][0]["OwnerId"]
arn = instance_desc["Reservations"][0]["Instances"][0]["IamInstanceProfile"]["Arn"]
result["role"] = arn.split(":")[5].split("/")[1]
env = re.search("^(" + EFConfig.VALID_ENV_REGEX + ")-", result["role"])
if not env:
raise IOError("Did not find environment in role name: " + result["role"])
result["env"] = env.group(1)
result["env_short"] = result["env"].strip(".0123456789")
result["service"] = "-".join(result["role"].split("-")[1:])
return result | [
"def",
"get_instance_aws_context",
"(",
"ec2_client",
")",
":",
"result",
"=",
"{",
"}",
"try",
":",
"result",
"[",
"\"region\"",
"]",
"=",
"http_get_metadata",
"(",
"\"placement/availability-zone/\"",
")",
"result",
"[",
"\"region\"",
"]",
"=",
"result",
"[",
... | Returns: a dictionary of aws context
dictionary will contain these entries:
region, instance_id, account, role, env, env_short, service
Raises: IOError if couldn't read metadata or lookup attempt failed | [
"Returns",
":",
"a",
"dictionary",
"of",
"aws",
"context",
"dictionary",
"will",
"contain",
"these",
"entries",
":",
"region",
"instance_id",
"account",
"role",
"env",
"env_short",
"service",
"Raises",
":",
"IOError",
"if",
"couldn",
"t",
"read",
"metadata",
"... | python | train |
CI-WATER/gsshapy | gsshapy/lib/db_tools.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L82-L117 | def init_sqlite_db(path, initTime=False):
"""
Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close()
"""
sqlite_base_url = 'sqlite:///'
sqlalchemy_url = sqlite_base_url + path
init_time = init_db(sqlalchemy_url)
if initTime:
print('TIME: {0} seconds'.format(init_time))
return sqlalchemy_url | [
"def",
"init_sqlite_db",
"(",
"path",
",",
"initTime",
"=",
"False",
")",
":",
"sqlite_base_url",
"=",
"'sqlite:///'",
"sqlalchemy_url",
"=",
"sqlite_base_url",
"+",
"path",
"init_time",
"=",
"init_db",
"(",
"sqlalchemy_url",
")",
"if",
"initTime",
":",
"print",... | Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close() | [
"Initialize",
"SQLite",
"Database",
"Args",
":",
"path",
"(",
"str",
")",
":",
"Path",
"to",
"database",
"(",
"Ex",
".",
"/",
"home",
"/",
"username",
"/",
"my_sqlite",
".",
"db",
")",
".",
"initTime",
"(",
"Optional",
"[",
"bool",
"]",
")",
":",
"... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.