text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def success(self):
"""Return boolean indicating whether a solution was found."""
self._check_valid()
if self._ret_val != 0:
return False
return swiglpk.glp_get_status(self._problem._p) == swiglpk.GLP_OPT | [
"def",
"success",
"(",
"self",
")",
":",
"self",
".",
"_check_valid",
"(",
")",
"if",
"self",
".",
"_ret_val",
"!=",
"0",
":",
"return",
"False",
"return",
"swiglpk",
".",
"glp_get_status",
"(",
"self",
".",
"_problem",
".",
"_p",
")",
"==",
"swiglpk",... | 40.333333 | 15.833333 |
def rollsingle(self, func, window=20, name=None, fallback=False,
align='right', **kwargs):
'''Efficient rolling window calculation for min, max type functions
'''
rname = 'roll_{0}'.format(func)
if fallback:
rfunc = getattr(lib.fallback, rname)
else:
rfunc = getattr(lib, rname, None)
if not rfunc:
rfunc = getattr(lib.fallback, rname)
data = np.array([list(rfunc(serie, window)) for serie in self.series()])
name = name or self.makename(func, window=window)
dates = asarray(self.dates())
desc = settings.desc
if (align == 'right' and not desc) or desc:
dates = dates[window-1:]
else:
dates = dates[:-window+1]
return self.clone(dates, data.transpose(), name=name) | [
"def",
"rollsingle",
"(",
"self",
",",
"func",
",",
"window",
"=",
"20",
",",
"name",
"=",
"None",
",",
"fallback",
"=",
"False",
",",
"align",
"=",
"'right'",
",",
"*",
"*",
"kwargs",
")",
":",
"rname",
"=",
"'roll_{0}'",
".",
"format",
"(",
"func... | 38 | 16.1 |
def diffusion(diffusion_constant=0.2, exposure_time=0.05, samples=200):
"""
See `diffusion_correlated` for information related to units, etc
"""
radius = 5
psfsize = np.array([2.0, 1.0, 3.0])
# create a base image of one particle
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[s0.inner]
position = 0*s0.obj.pos[0]
for i in xrange(samples):
offset = np.sqrt(6*diffusion_constant*exposure_time)*np.random.randn(3)
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage += s0.get_model_image()[s0.inner]
position += s0.obj.pos[0]
finalimage /= float(samples)
position /= float(samples)
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
# measure the true inferred parameters
return s, finalimage, position | [
"def",
"diffusion",
"(",
"diffusion_constant",
"=",
"0.2",
",",
"exposure_time",
"=",
"0.05",
",",
"samples",
"=",
"200",
")",
":",
"radius",
"=",
"5",
"psfsize",
"=",
"np",
".",
"array",
"(",
"[",
"2.0",
",",
"1.0",
",",
"3.0",
"]",
")",
"# create a... | 33.727273 | 20.69697 |
def BuildFilterFindSpecs(
self, artifact_definitions_path, custom_artifacts_path,
knowledge_base_object, artifact_filter_names=None, filter_file_path=None):
"""Builds find specifications from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
knowledge_base_object (KnowledgeBase): knowledge base.
artifact_filter_names (Optional[list[str]]): names of artifact
definitions that are used for filtering file system and Windows
Registry key paths.
filter_file_path (Optional[str]): path of filter file.
Returns:
list[dfvfs.FindSpec]: find specifications for the file source type.
Raises:
InvalidFilter: if no valid FindSpecs are built.
"""
environment_variables = knowledge_base_object.GetEnvironmentVariables()
find_specs = None
if artifact_filter_names:
logger.debug(
'building find specification based on artifacts: {0:s}'.format(
', '.join(artifact_filter_names)))
artifacts_registry_object = BaseEngine.BuildArtifactsRegistry(
artifact_definitions_path, custom_artifacts_path)
self._artifacts_filter_helper = (
artifact_filters.ArtifactDefinitionsFilterHelper(
artifacts_registry_object, knowledge_base_object))
self._artifacts_filter_helper.BuildFindSpecs(
artifact_filter_names, environment_variables=environment_variables)
# If the user selected Windows Registry artifacts we have to ensure
# the Windows Registry files are parsed.
if self._artifacts_filter_helper.registry_find_specs:
self._artifacts_filter_helper.BuildFindSpecs(
self._WINDOWS_REGISTRY_FILES_ARTIFACT_NAMES,
environment_variables=environment_variables)
find_specs = self._artifacts_filter_helper.file_system_find_specs
if not find_specs:
raise errors.InvalidFilter(
'No valid file system find specifications were built from '
'artifacts.')
elif filter_file_path:
logger.debug(
'building find specification based on filter file: {0:s}'.format(
filter_file_path))
filter_file_object = filter_file.FilterFile(filter_file_path)
find_specs = filter_file_object.BuildFindSpecs(
environment_variables=environment_variables)
if not find_specs:
raise errors.InvalidFilter(
'No valid file system find specifications were built from filter '
'file.')
return find_specs | [
"def",
"BuildFilterFindSpecs",
"(",
"self",
",",
"artifact_definitions_path",
",",
"custom_artifacts_path",
",",
"knowledge_base_object",
",",
"artifact_filter_names",
"=",
"None",
",",
"filter_file_path",
"=",
"None",
")",
":",
"environment_variables",
"=",
"knowledge_ba... | 40.65625 | 23.875 |
def features_tags_parse_str_to_dict(obj):
"""
Parse tag strings of all features in the collection into a Python
dictionary, if possible.
"""
features = obj['features']
for i in tqdm(range(len(features))):
tags = features[i]['properties'].get('tags')
if tags is not None:
try:
tags = json.loads("{" + tags.replace("=>", ":") + "}")
except:
try:
tags = eval("{" + tags.replace("=>", ":") + "}")
except:
tags = None
if type(tags) == dict:
features[i]['properties']['tags'] = {k:tags[k] for k in tags}
elif tags is None and 'tags' in features[i]['properties']:
del features[i]['properties']['tags']
return obj | [
"def",
"features_tags_parse_str_to_dict",
"(",
"obj",
")",
":",
"features",
"=",
"obj",
"[",
"'features'",
"]",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"len",
"(",
"features",
")",
")",
")",
":",
"tags",
"=",
"features",
"[",
"i",
"]",
"[",
"'pr... | 37.190476 | 15.666667 |
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces) | [
"def",
"_VarintBytes",
"(",
"value",
")",
":",
"pieces",
"=",
"[",
"]",
"_EncodeVarint",
"(",
"pieces",
".",
"append",
",",
"value",
")",
"return",
"b\"\"",
".",
"join",
"(",
"pieces",
")"
] | 33.428571 | 14.428571 |
def GetReportDownloadHeaders(self, **kwargs):
"""Returns a dictionary of headers for a report download request.
Note that the given keyword arguments will override any settings configured
from the googleads.yaml file.
Args:
**kwargs: Optional keyword arguments.
Keyword Arguments:
client_customer_id: A string containing a client_customer_id intended to
override the default value set by the AdWordsClient.
include_zero_impressions: A boolean indicating whether the report should
show rows with zero impressions.
skip_report_header: A boolean indicating whether to include a header row
containing the report name and date range. If false or not specified,
report output will include the header row.
skip_column_header: A boolean indicating whether to include column names
in reports. If false or not specified, report output will include the
column names.
skip_report_summary: A boolean indicating whether to include a summary row
containing the report totals. If false or not specified, report output
will include the summary row.
use_raw_enum_values: A boolean indicating whether to return enum field
values as enums instead of display values.
Returns:
A dictionary containing the headers configured for downloading a report.
Raises:
GoogleAdsValueError: If one or more of the report header keyword arguments
is invalid.
"""
headers = self._adwords_client.oauth2_client.CreateHttpHeader()
headers.update({
'Content-type': self._CONTENT_TYPE,
'developerToken': str(self._adwords_client.developer_token),
'clientCustomerId': str(kwargs.get(
'client_customer_id', self._adwords_client.client_customer_id)),
'User-Agent': ''.join([
self._adwords_client.user_agent,
googleads.common.GenerateLibSig(self._PRODUCT_SIG),
',gzip'])
})
headers.update(self.custom_http_headers)
updated_kwargs = dict(self._adwords_client.report_download_headers)
updated_kwargs.update(kwargs)
for kw in updated_kwargs:
try:
headers[_REPORT_HEADER_KWARGS[kw]] = str(updated_kwargs[kw])
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'The provided keyword "%s" is invalid. Accepted keywords are: %s'
% (kw, _REPORT_HEADER_KWARGS.keys()))
return headers | [
"def",
"GetReportDownloadHeaders",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"headers",
"=",
"self",
".",
"_adwords_client",
".",
"oauth2_client",
".",
"CreateHttpHeader",
"(",
")",
"headers",
".",
"update",
"(",
"{",
"'Content-type'",
":",
"self",
"."... | 41.948276 | 24.155172 |
def levels(self):
"""Returns a histogram for each RGBA channel.
Returns a 4-tuple of lists, r, g, b, and a.
Each list has 255 items, a count for each pixel value.
"""
h = self.img.histogram()
r = h[0:255]
g = h[256:511]
b = h[512:767]
a = h[768:1024]
return r, g, b, a | [
"def",
"levels",
"(",
"self",
")",
":",
"h",
"=",
"self",
".",
"img",
".",
"histogram",
"(",
")",
"r",
"=",
"h",
"[",
"0",
":",
"255",
"]",
"g",
"=",
"h",
"[",
"256",
":",
"511",
"]",
"b",
"=",
"h",
"[",
"512",
":",
"767",
"]",
"a",
"="... | 24.125 | 19 |
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
mpstate.input_queue.put(line) | [
"def",
"input_loop",
"(",
")",
":",
"while",
"mpstate",
".",
"status",
".",
"exit",
"!=",
"True",
":",
"try",
":",
"if",
"mpstate",
".",
"status",
".",
"exit",
"!=",
"True",
":",
"line",
"=",
"input",
"(",
"mpstate",
".",
"rl",
".",
"prompt",
")",
... | 30.8 | 11.2 |
def is_path(path):
"""Checks if the passed in path is a valid Path within the portal
:param path: The path to check
:type uid: string
:return: True if the path is a valid path within the portal
:rtype: bool
"""
if not isinstance(path, basestring):
return False
portal_path = get_path(get_portal())
if not path.startswith(portal_path):
return False
obj = get_object_by_path(path)
if obj is None:
return False
return True | [
"def",
"is_path",
"(",
"path",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"basestring",
")",
":",
"return",
"False",
"portal_path",
"=",
"get_path",
"(",
"get_portal",
"(",
")",
")",
"if",
"not",
"path",
".",
"startswith",
"(",
"portal_path",... | 28 | 14.470588 |
def nsamples_to_hourmin(x, pos):
'''Convert axes labels to experiment duration in hours/minutes
Notes
-----
Matplotlib FuncFormatter function
https://matplotlib.org/examples/pylab_examples/custom_ticker1.html
'''
h, m, s = hourminsec(x/16.0)
return '{:.0f}h {:2.0f}′'.format(h, m+round(s)) | [
"def",
"nsamples_to_hourmin",
"(",
"x",
",",
"pos",
")",
":",
"h",
",",
"m",
",",
"s",
"=",
"hourminsec",
"(",
"x",
"/",
"16.0",
")",
"return",
"'{:.0f}h {:2.0f}′'.f",
"o",
"rmat(h",
",",
" ",
"m",
"r",
"o",
"und(s",
")",
")",
"",
""
] | 28.454545 | 23.727273 |
def reference_doi(self, index):
"""Return the reference DOI."""
return self.reference_data(index).get("DOI", self.reference_extra_field("DOI", index)) | [
"def",
"reference_doi",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"reference_data",
"(",
"index",
")",
".",
"get",
"(",
"\"DOI\"",
",",
"self",
".",
"reference_extra_field",
"(",
"\"DOI\"",
",",
"index",
")",
")"
] | 54.666667 | 21 |
def _calc_all_possible_moves(self, input_color):
"""
Returns list of all possible moves
:type: input_color: Color
:rtype: list
"""
for piece in self:
# Tests if square on the board is not empty
if piece is not None and piece.color == input_color:
for move in piece.possible_moves(self):
test = cp(self)
test_move = Move(end_loc=move.end_loc,
piece=test.piece_at_square(move.start_loc),
status=move.status,
start_loc=move.start_loc,
promoted_to_piece=move.promoted_to_piece)
test.update(test_move)
if self.king_loc_dict is None:
yield move
continue
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if my_king is None or \
not isinstance(my_king, King) or \
my_king.color != input_color:
self.king_loc_dict[input_color] = test.find_king(input_color)
my_king = test.piece_at_square(self.king_loc_dict[input_color])
if not my_king.in_check(test):
yield move | [
"def",
"_calc_all_possible_moves",
"(",
"self",
",",
"input_color",
")",
":",
"for",
"piece",
"in",
"self",
":",
"# Tests if square on the board is not empty",
"if",
"piece",
"is",
"not",
"None",
"and",
"piece",
".",
"color",
"==",
"input_color",
":",
"for",
"mo... | 38.694444 | 21.75 |
def getPlayer(name):
"""obtain a specific PlayerRecord settings file"""
if isinstance(name, PlayerRecord): return name
try: return getKnownPlayers()[name.lower()]
except KeyError:
raise ValueError("given player name '%s' is not a known player definition"%(name)) | [
"def",
"getPlayer",
"(",
"name",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"PlayerRecord",
")",
":",
"return",
"name",
"try",
":",
"return",
"getKnownPlayers",
"(",
")",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
":",
"raise"... | 47.333333 | 18.333333 |
def prox_zero(X, step):
"""Proximal operator to project onto zero
"""
return np.zeros(X.shape, dtype=X.dtype) | [
"def",
"prox_zero",
"(",
"X",
",",
"step",
")",
":",
"return",
"np",
".",
"zeros",
"(",
"X",
".",
"shape",
",",
"dtype",
"=",
"X",
".",
"dtype",
")"
] | 29.5 | 5 |
def lcs(self, stringIdxs=-1):
"""Returns the Largest Common Substring of Strings provided in stringIdxs.
If stringIdxs is not provided, the LCS of all strings is returned.
::param stringIdxs: Optional: List of indexes of strings.
"""
if stringIdxs == -1 or not isinstance(stringIdxs, list):
stringIdxs = set(range(len(self.word_starts)))
else:
stringIdxs = set(stringIdxs)
deepestNode = self._find_lcs(self.root, stringIdxs)
start = deepestNode.idx
end = deepestNode.idx + deepestNode.depth
return self.word[start:end] | [
"def",
"lcs",
"(",
"self",
",",
"stringIdxs",
"=",
"-",
"1",
")",
":",
"if",
"stringIdxs",
"==",
"-",
"1",
"or",
"not",
"isinstance",
"(",
"stringIdxs",
",",
"list",
")",
":",
"stringIdxs",
"=",
"set",
"(",
"range",
"(",
"len",
"(",
"self",
".",
... | 40.666667 | 17.4 |
def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
dia = pw.ChooseOne(self, yes="Add single columns", no="Add groups")
result1 = dia.ShowModal()
if result1 == wx.ID_CANCEL:
return
elif result1 == wx.ID_YES:
items = sorted([col_name for col_name in self.dm.index if col_name not in col_labels])
dia = pw.HeaderDialog(self, 'columns to add',
items1=list(items), groups=[])
dia.Centre()
result2 = dia.ShowModal()
else:
groups = self.dm['group'].unique()
dia = pw.HeaderDialog(self, 'groups to add',
items1=list(groups), groups=True)
dia.Centre()
result2 = dia.ShowModal()
new_headers = []
if result2 == 5100:
new_headers = dia.text_list
# if there is nothing to add, quit
if not new_headers:
return
if result1 == wx.ID_YES:
# add individual headers
errors = self.add_new_grid_headers(new_headers)
else:
# add header groups
errors = self.add_new_header_groups(new_headers)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy() | [
"def",
"on_add_cols",
"(",
"self",
",",
"event",
")",
":",
"col_labels",
"=",
"self",
".",
"grid",
".",
"col_labels",
"dia",
"=",
"pw",
".",
"ChooseOne",
"(",
"self",
",",
"yes",
"=",
"\"Add single columns\"",
",",
"no",
"=",
"\"Add groups\"",
")",
"resu... | 40.204082 | 16.653061 |
def _render_line(self, line, settings):
"""
Render single box line.
"""
s = self._es(settings, self.SETTING_WIDTH, self.SETTING_FLAG_BORDER, self.SETTING_MARGIN, self.SETTING_MARGIN_LEFT, self.SETTING_MARGIN_RIGHT)
width_content = self.calculate_width_widget_int(**s)
s = self._es_content(settings)
s[self.SETTING_WIDTH] = width_content
line = self.fmt_content(line, **s)
s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING])
line = self.fmt_text(line, **s)
s = self._es(settings, self.SETTING_BORDER_STYLE)
bchar = self.bchar('v', 'm', **s)
s = self._es_text(settings, settings[self.SETTING_BORDER_FORMATING])
bchar = self.fmt_text(bchar, **s)
line = '{}{}{}'.format(bchar, line, bchar)
s = self._es_margin(settings)
line = self.fmt_margin(line, **s)
return line | [
"def",
"_render_line",
"(",
"self",
",",
"line",
",",
"settings",
")",
":",
"s",
"=",
"self",
".",
"_es",
"(",
"settings",
",",
"self",
".",
"SETTING_WIDTH",
",",
"self",
".",
"SETTING_FLAG_BORDER",
",",
"self",
".",
"SETTING_MARGIN",
",",
"self",
".",
... | 37.583333 | 19.75 |
def ws_db004(self, value=None):
""" Corresponds to IDD Field `ws_db004`
Mean wind speed coincident with 0.4% dry-bulb temperature
Args:
value (float): value for IDD Field `ws_db004`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws_db004`'.format(value))
self._ws_db004 = value | [
"def",
"ws_db004",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float... | 35 | 20.285714 |
def sky2pix_vec(self, pos, r, pa):
"""
Convert a vector from sky to pixel coords.
The vector has a magnitude, angle, and an origin on the sky.
Parameters
----------
pos : (float, float)
The (ra, dec) of the origin of the vector (degrees).
r : float
The magnitude or length of the vector (degrees).
pa : float
The position angle of the vector (degrees).
Returns
-------
x, y : float
The pixel coordinates of the origin.
r, theta : float
The magnitude (pixels) and angle (degrees) of the vector.
"""
ra, dec = pos
x, y = self.sky2pix(pos)
a = translate(ra, dec, r, pa)
locations = self.sky2pix(a)
x_off, y_off = locations
a = np.sqrt((x - x_off) ** 2 + (y - y_off) ** 2)
theta = np.degrees(np.arctan2((y_off - y), (x_off - x)))
return x, y, a, theta | [
"def",
"sky2pix_vec",
"(",
"self",
",",
"pos",
",",
"r",
",",
"pa",
")",
":",
"ra",
",",
"dec",
"=",
"pos",
"x",
",",
"y",
"=",
"self",
".",
"sky2pix",
"(",
"pos",
")",
"a",
"=",
"translate",
"(",
"ra",
",",
"dec",
",",
"r",
",",
"pa",
")",... | 29.65625 | 19.40625 |
def get_font_path(self):
"""Return the current font path as a list of strings."""
r = request.GetFontPath(display = self.display)
return r.paths | [
"def",
"get_font_path",
"(",
"self",
")",
":",
"r",
"=",
"request",
".",
"GetFontPath",
"(",
"display",
"=",
"self",
".",
"display",
")",
"return",
"r",
".",
"paths"
] | 41.25 | 12.25 |
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)
sql += self._add_foreign_keys(blueprint)
sql += self._add_primary_keys(blueprint)
return sql + ')' | [
"def",
"compile_create",
"(",
"self",
",",
"blueprint",
",",
"command",
",",
"_",
")",
":",
"columns",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"_get_columns",
"(",
"blueprint",
")",
")",
"sql",
"=",
"'CREATE TABLE %s (%s'",
"%",
"(",
"self",
".",
"... | 27.769231 | 19.461538 |
def Import(context, request):
""" Beckman Coulter Access 2 analysis results
"""
infile = request.form['rochecobas_taqman_model48_file']
fileformat = request.form['rochecobas_taqman_model48_format']
artoapply = request.form['rochecobas_taqman_model48_artoapply']
override = request.form['rochecobas_taqman_model48_override']
instrument = request.form.get('instrument', None)
errors = []
logs = []
warns = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'rsf':
parser = RocheCobasTaqmanParser(infile)
if fileformat == 'csv':
parser = RocheCobasTaqmanParser(infile, "csv")
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
importer = RocheCobasTaqman48Importer(parser=parser,
context=context,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results) | [
"def",
"Import",
"(",
"context",
",",
"request",
")",
":",
"infile",
"=",
"request",
".",
"form",
"[",
"'rochecobas_taqman_model48_file'",
"]",
"fileformat",
"=",
"request",
".",
"form",
"[",
"'rochecobas_taqman_model48_format'",
"]",
"artoapply",
"=",
"request",
... | 36.716667 | 18.983333 |
def register_editor(self, editor, parent, ensure_uniqueness=False):
"""
Registers given :class:`umbra.components.factory.script_editor.editor.Editor` class editor in the Model.
:param editor: Editor to register.
:type editor: Editor
:param parent: EditorNode parent.
:type parent: GraphModelNode
:param ensure_uniqueness: Ensure registrar uniqueness.
:type ensure_uniqueness: bool
:return: EditorNode.
:rtype: EditorNode
"""
if ensure_uniqueness:
if self.get_editor_nodes(editor):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' editor is already registered!".format(
self.__class__.__name__, editor))
LOGGER.debug("> Registering '{0}' editor.".format(editor))
row = parent.children_count()
self.beginInsertRows(self.get_node_index(parent), row, row)
editor_node = EditorNode(editor=editor,
parent=parent)
self.endInsertRows()
self.editor_registered.emit(editor_node)
return editor_node | [
"def",
"register_editor",
"(",
"self",
",",
"editor",
",",
"parent",
",",
"ensure_uniqueness",
"=",
"False",
")",
":",
"if",
"ensure_uniqueness",
":",
"if",
"self",
".",
"get_editor_nodes",
"(",
"editor",
")",
":",
"raise",
"foundations",
".",
"exceptions",
... | 36.9 | 20.5 |
def fluence(
power_mW,
color,
beam_radius,
reprate_Hz,
pulse_width,
color_units="wn",
beam_radius_units="mm",
pulse_width_units="fs_t",
area_type="even",
) -> tuple:
"""Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2
"""
# calculate beam area
if area_type == "even":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
elif area_type == "average":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
area_cm2 /= 0.7213 # weight by average intensity felt by oscillator inside of FWHM
else:
raise NotImplementedError
# calculate fluence in uj/cm^2
ujcm2 = power_mW / reprate_Hz # mJ
ujcm2 *= 1e3 # uJ
ujcm2 /= area_cm2 # uJ/cm^2
# calculate fluence in photons/cm^2
energy = wt_units.converter(color, color_units, "eV") # eV
photonscm2 = ujcm2 * 1e-6 # J/cm2
photonscm2 /= 1.60218e-19 # eV/cm2
photonscm2 /= energy # photons/cm2
# calculate peak intensity in GW/cm^2
pulse_width_s = wt_units.converter(pulse_width, pulse_width_units, "s_t") # seconds
GWcm2 = ujcm2 / 1e6 # J/cm2
GWcm2 /= pulse_width_s # W/cm2
GWcm2 /= 1e9
# finish
return ujcm2, photonscm2, GWcm2 | [
"def",
"fluence",
"(",
"power_mW",
",",
"color",
",",
"beam_radius",
",",
"reprate_Hz",
",",
"pulse_width",
",",
"color_units",
"=",
"\"wn\"",
",",
"beam_radius_units",
"=",
"\"mm\"",
",",
"pulse_width_units",
"=",
"\"fs_t\"",
",",
"area_type",
"=",
"\"even\"",
... | 32.536232 | 17.463768 |
def _load_int(self):
"""Load internal data from file and return it."""
values = numpy.fromfile(self.filepath_int)
if self.NDIM > 0:
values = values.reshape(self.seriesshape)
return values | [
"def",
"_load_int",
"(",
"self",
")",
":",
"values",
"=",
"numpy",
".",
"fromfile",
"(",
"self",
".",
"filepath_int",
")",
"if",
"self",
".",
"NDIM",
">",
"0",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"self",
".",
"seriesshape",
")",
"retur... | 37.666667 | 12.833333 |
def fine_tune_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
fine_tune_model_from_file_paths(model_archive_path=args.model_archive,
config_file=args.config_file,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
extend_vocab=args.extend_vocab,
file_friendly_logging=args.file_friendly_logging,
batch_weight_key=args.batch_weight_key,
embedding_sources_mapping=args.embedding_sources_mapping) | [
"def",
"fine_tune_model_from_args",
"(",
"args",
":",
"argparse",
".",
"Namespace",
")",
":",
"fine_tune_model_from_file_paths",
"(",
"model_archive_path",
"=",
"args",
".",
"model_archive",
",",
"config_file",
"=",
"args",
".",
"config_file",
",",
"serialization_dir"... | 61.583333 | 27.083333 |
def t_NAMESPACE(self, t):
r"([0-9a-zA-Z_])+(?=::)"
t.endlexpos = t.lexpos + len(t.value)
return t | [
"def",
"t_NAMESPACE",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"endlexpos",
"=",
"t",
".",
"lexpos",
"+",
"len",
"(",
"t",
".",
"value",
")",
"return",
"t"
] | 29.5 | 13 |
def calc_prob_mom(returns, other_returns):
"""
`Probabilistic momentum <http://cssanalytics.wordpress.com/2014/01/28/are-simple-momentum-strategies-too-dumb-introducing-probabilistic-momentum/>`_ (see `momentum investing <https://www.investopedia.com/terms/m/momentum_investing.asp>`_)
Basically the "probability or confidence that one asset
is going to outperform the other".
Source:
http://cssanalytics.wordpress.com/2014/01/28/are-simple-momentum-strategies-too-dumb-introducing-probabilistic-momentum/ # NOQA
"""
return t.cdf(returns.calc_information_ratio(other_returns),
len(returns) - 1) | [
"def",
"calc_prob_mom",
"(",
"returns",
",",
"other_returns",
")",
":",
"return",
"t",
".",
"cdf",
"(",
"returns",
".",
"calc_information_ratio",
"(",
"other_returns",
")",
",",
"len",
"(",
"returns",
")",
"-",
"1",
")"
] | 53.166667 | 38.166667 |
def fetch(self):
"""
Download a package
@returns: 0 = success or 1 if failed download
"""
#Default type to download
source = True
directory = "."
if self.options.file_type == "svn":
version = "dev"
svn_uri = get_download_uri(self.project_name, \
"dev", True)
if svn_uri:
directory = self.project_name + "_svn"
return self.fetch_svn(svn_uri, directory)
else:
self.logger.error(\
"ERROR: No subversion repository found for %s" % \
self.project_name)
return 1
elif self.options.file_type == "source":
source = True
elif self.options.file_type == "egg":
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
self.logger.error("No %s URI found for package: %s " % \
(self.options.file_type, self.project_name))
return 1 | [
"def",
"fetch",
"(",
"self",
")",
":",
"#Default type to download",
"source",
"=",
"True",
"directory",
"=",
"\".\"",
"if",
"self",
".",
"options",
".",
"file_type",
"==",
"\"svn\"",
":",
"version",
"=",
"\"dev\"",
"svn_uri",
"=",
"get_download_uri",
"(",
"s... | 31.914286 | 17.914286 |
def read_unsigned_var_int(file_obj):
"""Read a value using the unsigned, variable int encoding."""
result = 0
shift = 0
while True:
byte = struct.unpack(b"<B", file_obj.read(1))[0]
result |= ((byte & 0x7F) << shift)
if (byte & 0x80) == 0:
break
shift += 7
return result | [
"def",
"read_unsigned_var_int",
"(",
"file_obj",
")",
":",
"result",
"=",
"0",
"shift",
"=",
"0",
"while",
"True",
":",
"byte",
"=",
"struct",
".",
"unpack",
"(",
"b\"<B\"",
",",
"file_obj",
".",
"read",
"(",
"1",
")",
")",
"[",
"0",
"]",
"result",
... | 29.363636 | 16.181818 |
def visit_Boolean(self, node):
"""Visitor for `Boolean` AST node."""
if node.value == 'true':
return Bool(True)
elif node.value == 'false':
return Bool(False) | [
"def",
"visit_Boolean",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"value",
"==",
"'true'",
":",
"return",
"Bool",
"(",
"True",
")",
"elif",
"node",
".",
"value",
"==",
"'false'",
":",
"return",
"Bool",
"(",
"False",
")"
] | 33.5 | 7.333333 |
def spectral_entropy(X, Band, Fs, Power_Ratio=None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * numpy.log(Power_Ratio[i])
Spectral_Entropy /= numpy.log(
len(Power_Ratio)
) # to save time, minus one is omitted
return -1 * Spectral_Entropy | [
"def",
"spectral_entropy",
"(",
"X",
",",
"Band",
",",
"Fs",
",",
"Power_Ratio",
"=",
"None",
")",
":",
"if",
"Power_Ratio",
"is",
"None",
":",
"Power",
",",
"Power_Ratio",
"=",
"bin_power",
"(",
"X",
",",
"Band",
",",
"Fs",
")",
"Spectral_Entropy",
"=... | 28.389831 | 26.898305 |
def entry_point(items=tuple()):
"""
External entry point which calls main() and
if Stop is raised, calls sys.exit()
"""
try:
if not items:
from .example import ExampleCommand
from .version import Version
items = [(ExampleCommand.NAME, ExampleCommand),
(Version.NAME, Version)]
main("yaclifw", items=items)
except Stop as stop:
print(stop)
sys.exit(stop.rc)
except SystemExit:
raise
except KeyboardInterrupt:
print("Cancelled")
sys.exit(1)
except Exception:
traceback.print_exc()
sys.exit(1) | [
"def",
"entry_point",
"(",
"items",
"=",
"tuple",
"(",
")",
")",
":",
"try",
":",
"if",
"not",
"items",
":",
"from",
".",
"example",
"import",
"ExampleCommand",
"from",
".",
"version",
"import",
"Version",
"items",
"=",
"[",
"(",
"ExampleCommand",
".",
... | 27.521739 | 12.913043 |
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.t.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
df=self.latent_variables.z_list[-2].prior.transform(lv_draws[-2,i]), scale=np.exp(sigmas[i]/2.0)) for i in range(nsims)])
return data_draws | [
"def",
"sample",
"(",
"self",
",",
"nsims",
"=",
"1000",
")",
":",
"if",
"self",
".",
"latent_variables",
".",
"estimation_method",
"not",
"in",
"[",
"'BBVI'",
",",
"'M-H'",
"]",
":",
"raise",
"Exception",
"(",
"\"No latent variables estimated!\"",
")",
"els... | 44.35 | 26.85 |
def remove_unnecessary_whitespace(css):
"""Remove unnecessary whitespace characters."""
def pseudoclasscolon(css):
"""
Prevents 'p :link' from becoming 'p:link'.
Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is
translated back again later.
"""
regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)")
match = regex.search(css)
while match:
css = ''.join([
css[:match.start()],
match.group().replace(":", "___PSEUDOCLASSCOLON___"),
css[match.end():]])
match = regex.search(css)
return css
css = pseudoclasscolon(css)
# Remove spaces from before things.
css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css)
# If there is a `@charset`, then only allow one, and move to the beginning.
css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css)
css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css)
# Put the space back in for a few cases, such as `@media screen` and
# `(-webkit-min-device-pixel-ratio:0)`.
css = re.sub(r"\band\(", "and (", css)
# Put the colons back.
css = css.replace('___PSEUDOCLASSCOLON___', ':')
# Remove spaces from after things.
css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css)
return css | [
"def",
"remove_unnecessary_whitespace",
"(",
"css",
")",
":",
"def",
"pseudoclasscolon",
"(",
"css",
")",
":",
"\"\"\"\n Prevents 'p :link' from becoming 'p:link'.\n\n Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is\n translated back again later.\n ... | 31.707317 | 19.243902 |
def insert_penalty_model(cur, penalty_model):
"""Insert a penalty model into the database.
Args:
cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function
is meant to be run within a :obj:`with` statement.
penalty_model (:class:`penaltymodel.PenaltyModel`): A penalty
model to be stored in the database.
Examples:
>>> import networkx as nx
>>> import penaltymodel.core as pm
>>> import dimod
>>> graph = nx.path_graph(3)
>>> decision_variables = (0, 2)
>>> feasible_configurations = {(-1, -1): 0., (+1, +1): 0.}
>>> spec = pm.Specification(graph, decision_variables, feasible_configurations, dimod.SPIN)
>>> linear = {v: 0 for v in graph}
>>> quadratic = {edge: -1 for edge in graph.edges}
>>> model = dimod.BinaryQuadraticModel(linear, quadratic, 0.0, vartype=dimod.SPIN)
>>> widget = pm.PenaltyModel.from_specification(spec, model, 2., -2)
>>> with pmc.cache_connect(':memory:') as cur:
... pmc.insert_penalty_model(cur, widget)
"""
encoded_data = {}
linear, quadratic, offset = penalty_model.model.to_ising()
nodelist = sorted(linear)
edgelist = sorted(sorted(edge) for edge in penalty_model.graph.edges)
insert_graph(cur, nodelist, edgelist, encoded_data)
insert_feasible_configurations(cur, penalty_model.feasible_configurations, encoded_data)
insert_ising_model(cur, nodelist, edgelist, linear, quadratic, offset, encoded_data)
encoded_data['decision_variables'] = json.dumps(penalty_model.decision_variables, separators=(',', ':'))
encoded_data['classical_gap'] = penalty_model.classical_gap
encoded_data['ground_energy'] = penalty_model.ground_energy
insert = \
"""
INSERT OR IGNORE INTO penalty_model(
decision_variables,
classical_gap,
ground_energy,
feasible_configurations_id,
ising_model_id)
SELECT
:decision_variables,
:classical_gap,
:ground_energy,
feasible_configurations.id,
ising_model.id
FROM feasible_configurations, ising_model, graph
WHERE
graph.edges = :edges AND
graph.num_nodes = :num_nodes AND
ising_model.graph_id = graph.id AND
ising_model.linear_biases = :linear_biases AND
ising_model.quadratic_biases = :quadratic_biases AND
ising_model.offset = :offset AND
feasible_configurations.num_variables = :num_variables AND
feasible_configurations.num_feasible_configurations = :num_feasible_configurations AND
feasible_configurations.feasible_configurations = :feasible_configurations AND
feasible_configurations.energies = :energies;
"""
cur.execute(insert, encoded_data) | [
"def",
"insert_penalty_model",
"(",
"cur",
",",
"penalty_model",
")",
":",
"encoded_data",
"=",
"{",
"}",
"linear",
",",
"quadratic",
",",
"offset",
"=",
"penalty_model",
".",
"model",
".",
"to_ising",
"(",
")",
"nodelist",
"=",
"sorted",
"(",
"linear",
")... | 41.838235 | 21.352941 |
def _get_user(self, username, attrs=ALL_ATTRS):
"""Get a user from the ldap"""
username = ldap.filter.escape_filter_chars(username)
user_filter = self.user_filter_tmpl % {
'username': self._uni(username)
}
r = self._search(self._byte_p2(user_filter), attrs, self.userdn)
if len(r) == 0:
return None
# if NO_ATTR, only return the DN
if attrs == NO_ATTR:
dn_entry = r[0][0]
# in other cases, return everything (dn + attributes)
else:
dn_entry = r[0]
return dn_entry | [
"def",
"_get_user",
"(",
"self",
",",
"username",
",",
"attrs",
"=",
"ALL_ATTRS",
")",
":",
"username",
"=",
"ldap",
".",
"filter",
".",
"escape_filter_chars",
"(",
"username",
")",
"user_filter",
"=",
"self",
".",
"user_filter_tmpl",
"%",
"{",
"'username'",... | 30.736842 | 18.631579 |
def scroll_to_bottom(self):
"""
Scoll to the very bottom of the page
TODO: add increment & delay options to scoll slowly down the whole page to let each section load in
"""
if self.driver.selenium is not None:
try:
self.driver.selenium.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except WebDriverException:
self.driver.selenium.execute_script("window.scrollTo(0, 50000);")
except Exception:
logger.exception("Unknown error scrolling page") | [
"def",
"scroll_to_bottom",
"(",
"self",
")",
":",
"if",
"self",
".",
"driver",
".",
"selenium",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"driver",
".",
"selenium",
".",
"execute_script",
"(",
"\"window.scrollTo(0, document.body.scrollHeight);\"",
")",
... | 47.833333 | 21 |
def Version():
"""Gets the version of gdb as a 3-tuple.
The gdb devs seem to think it's a good idea to make --version
output multiple lines of welcome text instead of just the actual version,
so we ignore everything it outputs after the first line.
Returns:
The installed version of gdb in the form
(<major>, <minor or None>, <micro or None>)
gdb 7.7 would hence show up as version (7,7)
"""
output = subprocess.check_output(['gdb', '--version']).split('\n')[0]
# Example output (Arch linux):
# GNU gdb (GDB) 7.7
# Example output (Debian sid):
# GNU gdb (GDB) 7.6.2 (Debian 7.6.2-1)
# Example output (Debian wheezy):
# GNU gdb (GDB) 7.4.1-debian
# Example output (centos 2.6.32):
# GNU gdb (GDB) Red Hat Enterprise Linux (7.2-56.el6)
# As we've seen in the examples above, versions may be named very liberally
# So we assume every part of that string may be the "real" version string
# and try to parse them all. This too isn't perfect (later strings will
# overwrite information gathered from previous ones), but it should be
# flexible enough for everything out there.
major = None
minor = None
micro = None
for potential_versionstring in output.split():
version = re.split('[^0-9]', potential_versionstring)
try:
major = int(version[0])
except (IndexError, ValueError):
pass
try:
minor = int(version[1])
except (IndexError, ValueError):
pass
try:
micro = int(version[2])
except (IndexError, ValueError):
pass
return (major, minor, micro) | [
"def",
"Version",
"(",
")",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'gdb'",
",",
"'--version'",
"]",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"# Example output (Arch linux):",
"# GNU gdb (GDB) 7.7",
"# Example output (Debian... | 36.522727 | 18.272727 |
def find_project_config_file(project_root: str) -> str:
"""Return absolute path to project-specific config file, if it exists.
:param project_root: Absolute path to project root directory.
A project config file is a file named `YCONFIG_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
"""
if project_root:
project_config_file = os.path.join(project_root, YCONFIG_FILE)
if os.path.isfile(project_config_file):
return project_config_file | [
"def",
"find_project_config_file",
"(",
"project_root",
":",
"str",
")",
"->",
"str",
":",
"if",
"project_root",
":",
"project_config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_root",
",",
"YCONFIG_FILE",
")",
"if",
"os",
".",
"path",
".",
"... | 38 | 18.733333 |
def update_virtual_meta(self):
"""Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame."""
import astropy.units
try:
path = os.path.join(self.get_private_dir(create=False), "virtual_meta.yaml")
if os.path.exists(path):
meta_info = vaex.utils.read_json_or_yaml(path)
if 'virtual_columns' not in meta_info:
return
self.virtual_columns.update(meta_info["virtual_columns"])
self.variables.update(meta_info["variables"])
self.ucds.update(meta_info["ucds"])
self.descriptions.update(meta_info["descriptions"])
units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()}
self.units.update(units)
except:
logger.exception("non fatal error") | [
"def",
"update_virtual_meta",
"(",
"self",
")",
":",
"import",
"astropy",
".",
"units",
"try",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"get_private_dir",
"(",
"create",
"=",
"False",
")",
",",
"\"virtual_meta.yaml\"",
")",
"if... | 55.058824 | 19.823529 |
def _getScaledValue(self, inpt):
"""
Convert the input, which is in normal space, into log space
"""
if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:
return None
else:
val = inpt
if val < self.minval:
val = self.minval
elif val > self.maxval:
val = self.maxval
scaledVal = math.log10(val)
return scaledVal | [
"def",
"_getScaledValue",
"(",
"self",
",",
"inpt",
")",
":",
"if",
"inpt",
"==",
"SENTINEL_VALUE_FOR_MISSING_DATA",
":",
"return",
"None",
"else",
":",
"val",
"=",
"inpt",
"if",
"val",
"<",
"self",
".",
"minval",
":",
"val",
"=",
"self",
".",
"minval",
... | 23.933333 | 15.666667 |
def image1(d, u, v, w, dmind, dtind, beamnum, irange):
""" Parallelizable function for imaging a chunk of data for a single dm.
Assumes data is dedispersed and resampled, so this just images each integration.
Simple one-stage imaging that returns dict of params.
returns dictionary with keys of cand location and values as tuple of features
"""
i0, i1 = irange
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
# logger.info('i0 {0}, i1 {1}, dm {2}, dt {3}, len {4}'.format(i0, i1, dmind, dtind, len(data_resamp)))
ims,snr,candints = rtlib.imgallfullfilterxyflux(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1'])
# logger.info('finished imaging candints {0}'.format(candints))
feat = {}
for i in xrange(len(candints)):
if snr[i] > 0:
l1, m1 = calc_lm(d, ims[i], minmax='max')
else:
l1, m1 = calc_lm(d, ims[i], minmax='min')
logger.info('Got one! Int=%d, DM=%d, dt=%d: SNR_im=%.1f @ (%.2e,%.2e).' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], l1, m1))
candid = (d['segment'], (i0+candints[i])*d['dtarr'][dtind], dmind, dtind, beamnum)
# logger.info(candid)
# assemble feature in requested order
ff = []
for feature in d['features']:
if feature == 'snr1':
ff.append(snr[i])
elif feature == 'immax1':
if snr[i] > 0:
ff.append(ims[i].max())
else:
ff.append(ims[i].min())
elif feature == 'l1':
ff.append(l1)
elif feature == 'm1':
ff.append(m1)
elif feature == 'im40': # 40 pixel image peak cutout
peakx, peaky = n.where(ims[i] == ims[i].max())
sizex, sizey = ims[i].shape
# set image window with min=0 and max=size
xmin = max(0, peakx - 20); xmax = min(peakx + 20, sizex)
ymin = max(0, peaky - 20); ymax = min(peaky + 20, sizey)
ff.append(ims[i][xmin:xmax,ymin:ymax])
elif feature == 'spec20': # 20 int spectrum cutout
# set int window with min 0 and max len()
imin = max(0, (i0+candints[i])*d['dtarr'][dtind] - 10)
imax = min( (i0+candints[i])*d['dtarr'][dtind] + 10, len(data_resamp))
data_cut = data_resamp[imin:imax].copy()
rtlib.phaseshift_threaded(data_cut, d, l1, m1, u, v)
ff.append(data_cut.mean(axis=1))
elif feature in ['specstd', 'specskew', 'speckurtosis']: # this is standard set and must all appear together
if feature == 'specstd': # first this one, then others will use same data
seli = (i0+candints[i])*d['dtarr'][dtind]
datasel = data_resamp[seli:seli+1].copy()
rtlib.phaseshift_threaded(datasel, d, l1, m1, u, v)
data = n.ma.masked_equal(datasel, 0j)
spec = data.mean(axis=3).mean(axis=1).mean(axis=0).real
std = spec.std(axis=0)
ff.append(std)
elif feature == 'specskew':
skew = float(mstats.skew(spec))
ff.append(skew)
elif feature == 'speckurtosis':
kurtosis = float(mstats.kurtosis(spec))
ff.append(kurtosis)
elif feature == 'imskew':
skew = float(mstats.skew(ims[i].flatten()))
ff.append(skew)
elif feature == 'imkurtosis':
kurtosis = float(mstats.kurtosis(ims[i].flatten()))
ff.append(kurtosis)
feat[candid] = list(ff)
return feat | [
"def",
"image1",
"(",
"d",
",",
"u",
",",
"v",
",",
"w",
",",
"dmind",
",",
"dtind",
",",
"beamnum",
",",
"irange",
")",
":",
"i0",
",",
"i1",
"=",
"irange",
"data_resamp",
"=",
"numpyview",
"(",
"data_resamp_mem",
",",
"'complex64'",
",",
"datashape... | 49.474359 | 23.666667 |
def _get_next_parent_node(self, parent):
""" Used by _get_next_child_node, this method is called to find next possible parent.
For example if timeperiod 2011010200 has all children processed, but is not yet processed itself
then it makes sense to look in 2011010300 for hourly nodes """
grandparent = parent.parent
if grandparent is None:
# here, we work at yearly/linear level
return None
parent_siblings = list(grandparent.children)
sorted_keys = sorted(parent_siblings)
index = sorted_keys.index(parent.timeperiod)
if index + 1 >= len(sorted_keys):
return None
else:
return grandparent.children[sorted_keys[index + 1]] | [
"def",
"_get_next_parent_node",
"(",
"self",
",",
"parent",
")",
":",
"grandparent",
"=",
"parent",
".",
"parent",
"if",
"grandparent",
"is",
"None",
":",
"# here, we work at yearly/linear level",
"return",
"None",
"parent_siblings",
"=",
"list",
"(",
"grandparent",... | 46.4375 | 15.375 |
def hsv_to_rgb(hsv):
"""
Vectorized HSV to RGB conversion, adapted from:
http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion
"""
h, s, v = (hsv[..., i] for i in range(3))
shape = h.shape
i = np.int_(h*6.)
f = h*6.-i
q = f
t = 1.-f
i = np.ravel(i)
f = np.ravel(f)
i%=6
t = np.ravel(t)
q = np.ravel(q)
s = np.ravel(s)
v = np.ravel(v)
clist = (1-s*np.vstack([np.zeros_like(f),np.ones_like(f),q,t]))*v
#0:v 1:p 2:q 3:t
order = np.array([[0,3,1],[2,0,1],[1,0,3],[1,2,0],[3,1,0],[0,1,2]])
rgb = clist[order[i], np.arange(np.prod(shape))[:,None]]
return rgb.reshape(shape+(3,)) | [
"def",
"hsv_to_rgb",
"(",
"hsv",
")",
":",
"h",
",",
"s",
",",
"v",
"=",
"(",
"hsv",
"[",
"...",
",",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
")",
"shape",
"=",
"h",
".",
"shape",
"i",
"=",
"np",
".",
"int_",
"(",
"h",
"*",
... | 23.428571 | 23.571429 |
def _normalize(self, key, value):
"""
Use normalize_<key> methods to normalize user input. Any user
input will be normalized at the moment it is used as filter,
or entered as a value of Task attribute.
"""
# None value should not be converted by normalizer
if value is None:
return None
normalize_func = getattr(self, 'normalize_{0}'.format(key),
lambda x: x)
return normalize_func(value) | [
"def",
"_normalize",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# None value should not be converted by normalizer",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"normalize_func",
"=",
"getattr",
"(",
"self",
",",
"'normalize_{0}'",
".",
"format",
... | 32.933333 | 18.533333 |
def json_encode_default(obj):
'''
Convert datetime.datetime to timestamp
:param obj: value to (possibly) convert
'''
if isinstance(obj, (datetime, date)):
result = dt2ts(obj)
else:
result = json_encoder.default(obj)
return to_encoding(result) | [
"def",
"json_encode_default",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"datetime",
",",
"date",
")",
")",
":",
"result",
"=",
"dt2ts",
"(",
"obj",
")",
"else",
":",
"result",
"=",
"json_encoder",
".",
"default",
"(",
"obj",
")"... | 25.181818 | 16.272727 |
def PrivateKeyFromNEP2(nep2_key, passphrase):
"""
Gets the private key from a NEP-2 encrypted private key
Args:
nep2_key (str): The nep-2 encrypted private key
passphrase (str): The password to encrypt the private key with, as unicode string
Returns:
bytes: The private key
"""
if not nep2_key or len(nep2_key) != 58:
raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key)))
ADDRESS_HASH_SIZE = 4
ADDRESS_HASH_OFFSET = len(NEP_FLAG) + len(NEP_HEADER)
try:
decoded_key = base58.b58decode_check(nep2_key)
except Exception as e:
raise ValueError("Invalid nep2_key")
address_hash = decoded_key[ADDRESS_HASH_OFFSET:ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE]
encrypted = decoded_key[-32:]
pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')
derived = scrypt.hash(pwd_normalized, address_hash,
N=SCRYPT_ITERATIONS,
r=SCRYPT_BLOCKSIZE,
p=SCRYPT_PARALLEL_FACTOR,
buflen=SCRYPT_KEY_LEN_BYTES)
derived1 = derived[:32]
derived2 = derived[32:]
cipher = AES.new(derived2, AES.MODE_ECB)
decrypted = cipher.decrypt(encrypted)
private_key = xor_bytes(decrypted, derived1)
# Now check that the address hashes match. If they don't, the password was wrong.
kp_new = KeyPair(priv_key=private_key)
kp_new_address = kp_new.GetAddress()
kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode("utf-8")).digest()
kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest()
kp_new_address_hash = kp_new_address_hash_tmp2[:4]
if (kp_new_address_hash != address_hash):
raise ValueError("Wrong passphrase")
return private_key | [
"def",
"PrivateKeyFromNEP2",
"(",
"nep2_key",
",",
"passphrase",
")",
":",
"if",
"not",
"nep2_key",
"or",
"len",
"(",
"nep2_key",
")",
"!=",
"58",
":",
"raise",
"ValueError",
"(",
"'Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'",
".",
"format",
"... | 40.22449 | 23.489796 |
def remove_line_interval(input_file: str, delete_line_from: int,
delete_line_to: int, output_file: str):
r"""Remove a line interval.
:parameter input_file: the file that needs to be read.
:parameter delete_line_from: the line number from which start deleting.
:parameter delete_line_to: the line number to which stop deleting.
:parameter output_file: the file that needs to be written without the
selected lines.
:type input_file: str
:type delete_line_from: int
:type delete_line_to: int
:type output_file: str
:returns: None
:raises: LineOutOfFileBoundsError or a built-in exception.
.. note::
Line numbers start from ``1``.
.. note::
It is possible to remove a single line only. This happens when
the parameters delete_line_from and delete_line_to are equal.
"""
assert delete_line_from >= 1
assert delete_line_to >= 1
with open(input_file, 'r') as f:
lines = f.readlines()
# Invalid line ranges.
# Base case delete_line_to - delete_line_from == 0: single line.
if delete_line_to - delete_line_from < 0:
raise NegativeLineRangeError
if delete_line_from > len(lines) or delete_line_to > len(lines):
raise LineOutOfFileBoundsError
line_counter = 1
# Rewrite the file without the string.
with atomic_write(output_file, overwrite=True) as f:
for line in lines:
# Ignore the line interval where the content to be deleted lies.
if line_counter >= delete_line_from and line_counter <= delete_line_to:
pass
# Write the rest of the file.
else:
f.write(line)
line_counter += 1 | [
"def",
"remove_line_interval",
"(",
"input_file",
":",
"str",
",",
"delete_line_from",
":",
"int",
",",
"delete_line_to",
":",
"int",
",",
"output_file",
":",
"str",
")",
":",
"assert",
"delete_line_from",
">=",
"1",
"assert",
"delete_line_to",
">=",
"1",
"wit... | 36.382979 | 20 |
def clipPolygons(self, polygons):
"""
Recursively remove all polygons in `polygons` that are inside this BSP
tree.
"""
if not self.plane:
return polygons[:]
front = []
back = []
for poly in polygons:
self.plane.splitPolygon(poly, front, back, front, back)
if self.front:
front = self.front.clipPolygons(front)
if self.back:
back = self.back.clipPolygons(back)
else:
back = []
front.extend(back)
return front | [
"def",
"clipPolygons",
"(",
"self",
",",
"polygons",
")",
":",
"if",
"not",
"self",
".",
"plane",
":",
"return",
"polygons",
"[",
":",
"]",
"front",
"=",
"[",
"]",
"back",
"=",
"[",
"]",
"for",
"poly",
"in",
"polygons",
":",
"self",
".",
"plane",
... | 24.217391 | 20.434783 |
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True | [
"def",
"write_to_cache",
"(",
"self",
",",
"data",
",",
"filename",
"=",
"''",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"cache_path_cache",
"json_data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"with",
"open",
"(",
"fil... | 37.75 | 9.5 |
def absolute_abundance(coverage, total_bases):
"""
absolute abundance = (number of bases mapped to genome / total number of bases in sample) * 100
"""
absolute = {}
for genome in coverage:
absolute[genome] = []
index = 0
for calc in coverage[genome]:
bases = calc[0]
total = total_bases[index]
absolute[genome].append((bases / total) * float(100))
index += 1
total_assembled = [0 for i in absolute[genome]]
for genome in absolute:
index = 0
for cov in absolute[genome]:
total_assembled[index] += cov
index += 1
absolute['Unassembled'] = [(100 - i) for i in total_assembled]
return absolute | [
"def",
"absolute_abundance",
"(",
"coverage",
",",
"total_bases",
")",
":",
"absolute",
"=",
"{",
"}",
"for",
"genome",
"in",
"coverage",
":",
"absolute",
"[",
"genome",
"]",
"=",
"[",
"]",
"index",
"=",
"0",
"for",
"calc",
"in",
"coverage",
"[",
"geno... | 28.857143 | 18.095238 |
def build_node_key_search(query, key) -> NodePredicate:
"""Build a node filter for nodes whose values for the given key are superstrings of the query string(s).
:param query: The query string or strings to check if they're in the node name
:type query: str or iter[str]
:param str key: The key for the node data dictionary. Should refer only to entries that have str values
"""
if isinstance(query, str):
return build_node_data_search(key, lambda s: query.lower() in s.lower())
if isinstance(query, Iterable):
return build_node_data_search(key, lambda s: any(q.lower() in s.lower() for q in query))
raise TypeError('query is wrong type: %s', query) | [
"def",
"build_node_key_search",
"(",
"query",
",",
"key",
")",
"->",
"NodePredicate",
":",
"if",
"isinstance",
"(",
"query",
",",
"str",
")",
":",
"return",
"build_node_data_search",
"(",
"key",
",",
"lambda",
"s",
":",
"query",
".",
"lower",
"(",
")",
"... | 49 | 26.785714 |
def get_interface_detail_output_interface_ifHCOutBroadcastPkts(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ifHCOutBroadcastPkts = ET.SubElement(interface, "ifHCOutBroadcastPkts")
ifHCOutBroadcastPkts.text = kwargs.pop('ifHCOutBroadcastPkts')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_interface_detail_output_interface_ifHCOutBroadcastPkts",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_interface_detail",
"=",
"ET",
".",
"Element",
"(",
"\"get_interface_detail\"",
")",... | 51.647059 | 20.411765 |
def _print_level(level, msg):
"""Print the information in Unicode safe manner."""
for l in str(msg.rstrip()).split("\n"):
print("{0:<9s}{1}".format(level, str(l))) | [
"def",
"_print_level",
"(",
"level",
",",
"msg",
")",
":",
"for",
"l",
"in",
"str",
"(",
"msg",
".",
"rstrip",
"(",
")",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"print",
"(",
"\"{0:<9s}{1}\"",
".",
"format",
"(",
"level",
",",
"str",
"(",
"l... | 44 | 5.75 |
def hmac(key, message, tag=None, alg=hashlib.sha256):
"""
Generates a hashed message authentication code (HMAC) by prepending the
specified @tag string to a @message, then hashing with to HMAC
using a cryptographic @key and hashing @alg -orithm.
"""
return HMAC.new(str(key), str(tag) + str(message), digestmod=alg).digest() | [
"def",
"hmac",
"(",
"key",
",",
"message",
",",
"tag",
"=",
"None",
",",
"alg",
"=",
"hashlib",
".",
"sha256",
")",
":",
"return",
"HMAC",
".",
"new",
"(",
"str",
"(",
"key",
")",
",",
"str",
"(",
"tag",
")",
"+",
"str",
"(",
"message",
")",
... | 49 | 18.428571 |
def send_and_wait(self, message, params=None, timeout=10, raises=False):
"""Send service method request and wait for response
:param message:
proto message instance (use :meth:`SteamUnifiedMessages.get`)
or method name (e.g. ``Player.GetGameBadgeLevels#1``)
:type message: :class:`str`, proto message instance
:param params: message parameters
:type params: :class:`dict`
:param timeout: (optional) seconds to wait
:type timeout: :class:`int`
:param raises: (optional) On timeout if :class:`False` return :class:`None`, else raise :class:`gevent.Timeout`
:type raises: :class:`bool`
:return: response proto message instance
:rtype: (proto message, :class:`.UnifiedMessageError`)
:raises: :class:`gevent.Timeout`
"""
job_id = self.send(message, params)
resp = self.wait_event(job_id, timeout, raises=raises)
return (None, None) if resp is None else resp | [
"def",
"send_and_wait",
"(",
"self",
",",
"message",
",",
"params",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"raises",
"=",
"False",
")",
":",
"job_id",
"=",
"self",
".",
"send",
"(",
"message",
",",
"params",
")",
"resp",
"=",
"self",
".",
"wa... | 49.3 | 16.95 |
def lookup(self, name: str):
'''lookup a symbol by fully qualified name.'''
# <module>
if name in self._moduleMap:
return self._moduleMap[name]
# <module>.<Symbol>
(module_name, type_name, fragment_name) = self.split_typename(name)
if not module_name and type_name:
click.secho('not able to lookup symbol: {0}'.format(name), fg='red')
return None
module = self._moduleMap[module_name]
return module.lookup(type_name, fragment_name) | [
"def",
"lookup",
"(",
"self",
",",
"name",
":",
"str",
")",
":",
"# <module>",
"if",
"name",
"in",
"self",
".",
"_moduleMap",
":",
"return",
"self",
".",
"_moduleMap",
"[",
"name",
"]",
"# <module>.<Symbol>",
"(",
"module_name",
",",
"type_name",
",",
"f... | 43.333333 | 14.833333 |
def transitions_for(self, roles=None, actor=None, anchors=[]):
"""
For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`.
"""
proxy = self.obj.access_for(roles, actor, anchors)
return {name: transition for name, transition in self.transitions(current=False).items()
if name in proxy} | [
"def",
"transitions_for",
"(",
"self",
",",
"roles",
"=",
"None",
",",
"actor",
"=",
"None",
",",
"anchors",
"=",
"[",
"]",
")",
":",
"proxy",
"=",
"self",
".",
"obj",
".",
"access_for",
"(",
"roles",
",",
"actor",
",",
"anchors",
")",
"return",
"{... | 53.888889 | 22.777778 |
def check_who_am_i(self):
"""
This method checks verifies the device ID.
@return: True if valid, False if not
"""
register = self.MMA8452Q_Register['WHO_AM_I']
self.board.i2c_read_request(self.address, register, 1,
Constants.I2C_READ | Constants.I2C_END_TX_MASK,
self.data_val, Constants.CB_TYPE_DIRECT)
reply = self.wait_for_read_result()
if reply[self.data_start] == self.device_id:
rval = True
else:
rval = False
return rval | [
"def",
"check_who_am_i",
"(",
"self",
")",
":",
"register",
"=",
"self",
".",
"MMA8452Q_Register",
"[",
"'WHO_AM_I'",
"]",
"self",
".",
"board",
".",
"i2c_read_request",
"(",
"self",
".",
"address",
",",
"register",
",",
"1",
",",
"Constants",
".",
"I2C_RE... | 31 | 21 |
def _install_interrupt_handler():
"""Suppress KeyboardInterrupt traceback display in specific situations
If not running in dev mode, and if executed from the command line, then
we raise SystemExit instead of KeyboardInterrupt. This provides a clean
exit.
:returns: None if no action is taken, original interrupt handler otherwise
"""
# These would clutter the quilt.x namespace, so they're imported here instead.
import os
import sys
import signal
import pkg_resources
from .tools import const
# Check to see what entry points / scripts are configred to run quilt from the CLI
# By doing this, we have these benefits:
# * Avoid closing someone's Jupyter/iPython/bPython session when they hit ctrl-c
# * Avoid calling exit() when being used as an external lib
# * Provide exceptions when running in Jupyter/iPython/bPython
# * Provide exceptions when running in unexpected circumstances
quilt = pkg_resources.get_distribution('quilt')
executable = os.path.basename(sys.argv[0])
entry_points = quilt.get_entry_map().get('console_scripts', [])
# When python is run with '-c', this was executed via 'python -c "<some python code>"'
if executable == '-c':
# This is awkward and somewhat hackish, but we have to ensure that this is *us*
# executing via 'python -c'
if len(sys.argv) > 1 and sys.argv[1] == 'quilt testing':
# it's us. Let's pretend '-c' is an entry point.
entry_points['-c'] = 'blah'
sys.argv.pop(1)
if executable not in entry_points:
return
# We're running as a console script.
# If not in dev mode, use SystemExit instead of raising KeyboardInterrupt
def handle_interrupt(signum, stack):
# Check for dev mode
if _DEV_MODE is None:
# Args and environment have not been parsed, and no _DEV_MODE state has been set.
dev_mode = True if len(sys.argv) > 1 and sys.argv[1] == '--dev' else False
dev_mode = True if os.environ.get('QUILT_DEV_MODE', '').strip().lower() == 'true' else dev_mode
else: # Use forced dev-mode if _DEV_MODE is set
dev_mode = _DEV_MODE
# In order to display the full traceback, we lose control of the exit code here.
# Dev mode ctrl-c exit just produces the generic exit error code 1
if dev_mode:
raise KeyboardInterrupt()
# Normal exit
# avoid annoying prompt displacement when hitting ctrl-c
print()
exit(const.EXIT_KB_INTERRUPT)
return signal.signal(signal.SIGINT, handle_interrupt) | [
"def",
"_install_interrupt_handler",
"(",
")",
":",
"# These would clutter the quilt.x namespace, so they're imported here instead.",
"import",
"os",
"import",
"sys",
"import",
"signal",
"import",
"pkg_resources",
"from",
".",
"tools",
"import",
"const",
"# Check to see what en... | 44.724138 | 24.844828 |
def get_record(self, fileName, ref_extract_callback=None):
"""
Gets the Marc xml of the files in xaml_jp directory
:param fileName: the name of the file to parse.
:type fileName: string
:param refextract_callback: callback to be used to extract
unstructured references. It should
return a marcxml formated string
of the reference.
:type refextract_callback: callable
:returns: a string with the marc xml version of the file.
"""
self.document = parse(fileName)
article_type = self._get_article_type()
if article_type not in ['research-article',
'introduction',
'letter']:
return ''
rec = create_record()
title, subtitle, notes = self._get_title()
subfields = []
if subtitle:
subfields.append(('b', subtitle))
if title:
subfields.append(('a', title))
record_add_field(rec, '245', subfields=subfields)
subjects = self.document.getElementsByTagName('kwd')
subjects = map(xml_to_text, subjects)
for note_id in notes:
note = self._get_note(note_id)
if note:
record_add_field(rec, '500', subfields=[('a', note)])
for subject in subjects:
record_add_field(rec, '650', ind1='1', ind2='7',
subfields=[('2', 'EDPSciences'),
('a', subject)])
keywords = self._get_keywords()
for keyword in keywords:
record_add_field(rec, '653', ind1='1', subfields=[('a', keyword),
('9', 'author')])
journal, volume, issue, year, date, doi, page,\
fpage, lpage = self._get_publication_information()
astronomy_journals = ['EAS Publ.Ser.', 'Astron.Astrophys.']
if journal in astronomy_journals:
record_add_field(rec, '650', ind1='1', ind2='7',
subfields=[('2', 'INSPIRE'),
('a', 'Astrophysics')])
if date:
record_add_field(rec, '260', subfields=[('c', date),
('t', 'published')])
if doi:
record_add_field(rec, '024', ind1='7', subfields=[('a', doi),
('2', 'DOI')])
abstract = self._get_abstract()
abstract = self._format_abstract(abstract)
if abstract:
record_add_field(rec, '520', subfields=[('a', abstract),
('9', 'EDPSciences')])
license, license_type, license_url = self._get_license()
subfields = []
if license:
subfields.append(('a', license))
if license_url:
subfields.append(('u', license_url))
if subfields:
record_add_field(rec, '540', subfields=subfields)
if license_type == 'open-access':
self._attach_fulltext(rec, doi)
number_of_pages = self._get_page_count()
if number_of_pages:
record_add_field(rec, '300', subfields=[('a', number_of_pages)])
c_holder, c_year, c_statement = self._get_copyright()
if c_holder and c_year:
record_add_field(rec, '542', subfields=[('d', c_holder),
('g', c_year),
('e', 'Article')])
elif c_statement:
record_add_field(rec, '542', subfields=[('f', c_statement),
('e', 'Article')])
subfields = []
if journal:
subfields.append(('p', journal))
if issue:
subfields.append(('n', issue))
if volume:
subfields.append(('v', volume))
if fpage and lpage:
subfields.append(('c', '%s-%s' % (fpage,
lpage)))
elif page:
subfields.append(('c', page))
if year:
subfields.append(('y', year))
record_add_field(rec, '773', subfields=subfields)
record_add_field(rec, '980', subfields=[('a', 'HEP')])
conference = ''
for tag in self.document.getElementsByTagName('conference'):
conference = xml_to_text(tag)
if conference:
record_add_field(rec, '980', subfields=[('a', 'ConferencePaper')])
record_add_field(rec, '500', subfields=[('a', conference)])
self._add_references(rec, ref_extract_callback)
self._add_authors(rec)
try:
return record_xml_output(rec)
except UnicodeDecodeError:
message = "Found a bad char in the file for the article " + doi
sys.stderr.write(message)
return "" | [
"def",
"get_record",
"(",
"self",
",",
"fileName",
",",
"ref_extract_callback",
"=",
"None",
")",
":",
"self",
".",
"document",
"=",
"parse",
"(",
"fileName",
")",
"article_type",
"=",
"self",
".",
"_get_article_type",
"(",
")",
"if",
"article_type",
"not",
... | 44.81982 | 17.162162 |
def get_wireframe(viewer, x, y, z, **kwargs):
"""Produce a compound object of paths implementing a wireframe.
x, y, z are expected to be 2D arrays of points making up the mesh.
"""
# TODO: something like this would make a great utility function
# for ginga
n, m = x.shape
objs = []
for i in range(n):
pts = np.asarray([(x[i][j], y[i][j], z[i][j])
for j in range(m)])
objs.append(viewer.dc.Path(pts, **kwargs))
for j in range(m):
pts = np.asarray([(x[i][j], y[i][j], z[i][j])
for i in range(n)])
objs.append(viewer.dc.Path(pts, **kwargs))
return viewer.dc.CompoundObject(*objs) | [
"def",
"get_wireframe",
"(",
"viewer",
",",
"x",
",",
"y",
",",
"z",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: something like this would make a great utility function",
"# for ginga",
"n",
",",
"m",
"=",
"x",
".",
"shape",
"objs",
"=",
"[",
"]",
"for",
"... | 36 | 16.315789 |
def m2o_to_x2m(cr, model, table, field, source_field):
"""
Transform many2one relations into one2many or many2many.
Use rename_columns in your pre-migrate script to retain the column's old
value, then call m2o_to_x2m in your post-migrate script.
WARNING: If converting to one2many, there can be data loss, because only
one inverse record can be mapped in a one2many, but you can have multiple
many2one pointing to the same target. Use it when the use case allows this
conversion.
:param model: The target model registry object
:param table: The source table
:param field: The new field name on the target model
:param source_field: the (renamed) many2one column on the source table.
.. versionadded:: 8.0
"""
columns = getattr(model, '_columns', False) or getattr(model, '_fields')
if not columns.get(field):
do_raise("m2o_to_x2m: field %s doesn't exist in model %s" % (
field, model._name))
m2m_types = []
if many2many:
m2m_types.append(many2many)
if Many2many:
m2m_types.append(Many2many)
o2m_types = []
if one2many:
o2m_types.append(one2many)
if One2many:
o2m_types.append(One2many)
if isinstance(columns[field], tuple(m2m_types)):
column = columns[field]
if hasattr(many2many, '_sql_names'): # >= 6.1 and < 10.0
rel, id1, id2 = many2many._sql_names(column, model)
elif hasattr(column, 'relation'): # >= 10.0
rel, id1, id2 = column.relation, column.column1, column.column2
else: # <= 6.0
rel, id1, id2 = column._rel, column._id1, column._id2
logged_query(
cr,
"""
INSERT INTO %s (%s, %s)
SELECT id, %s
FROM %s
WHERE %s is not null
""" %
(rel, id1, id2, source_field, table, source_field))
elif isinstance(columns[field], tuple(o2m_types)):
if isinstance(columns[field], One2many): # >= 8.0
target_table = model.env[columns[field].comodel_name]._table
target_field = columns[field].inverse_name
else:
target_table = model.pool[columns[field]._obj]._table
target_field = columns[field]._fields_id
logged_query(
cr,
"""
UPDATE %(target_table)s AS target
SET %(target_field)s=source.id
FROM %(source_table)s AS source
WHERE source.%(source_field)s=target.id
""" % {'target_table': target_table,
'target_field': target_field,
'source_field': source_field,
'source_table': table})
else:
do_raise(
"m2o_to_x2m: field %s of model %s is not a "
"many2many/one2many one" % (field, model._name)) | [
"def",
"m2o_to_x2m",
"(",
"cr",
",",
"model",
",",
"table",
",",
"field",
",",
"source_field",
")",
":",
"columns",
"=",
"getattr",
"(",
"model",
",",
"'_columns'",
",",
"False",
")",
"or",
"getattr",
"(",
"model",
",",
"'_fields'",
")",
"if",
"not",
... | 39.338028 | 17.746479 |
def taf(trans: TafLineTrans) -> str:
"""
Condense the translation strings into a single forecast summary string
"""
summary = []
if trans.wind:
summary.append('Winds ' + trans.wind)
if trans.visibility:
summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower())
if trans.altimeter:
summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')])
if trans.other:
summary.append(trans.other)
if trans.clouds:
summary.append(trans.clouds.replace(' - Reported AGL', ''))
if trans.wind_shear:
summary.append(trans.wind_shear)
if trans.turbulance:
summary.append(trans.turbulance)
if trans.icing:
summary.append(trans.icing)
return ', '.join(summary) | [
"def",
"taf",
"(",
"trans",
":",
"TafLineTrans",
")",
"->",
"str",
":",
"summary",
"=",
"[",
"]",
"if",
"trans",
".",
"wind",
":",
"summary",
".",
"append",
"(",
"'Winds '",
"+",
"trans",
".",
"wind",
")",
"if",
"trans",
".",
"visibility",
":",
"su... | 34.818182 | 15.818182 |
def repost(self, token):
"""
Repost the job if it has timed out
(:py:data:`cloudsight.STATUS_TIMEOUT`).
:param token: Job token as returned from
:py:meth:`cloudsight.API.image_request` or
:py:meth:`cloudsight.API.remote_image_request`
"""
url = '%s/%s/repost' % (REQUESTS_URL, token)
response = requests.post(url, headers={
'Authorization': self.auth.authorize('POST', url),
'User-Agent': USER_AGENT,
})
if response.status_code == 200:
return
return self._unwrap_error(response) | [
"def",
"repost",
"(",
"self",
",",
"token",
")",
":",
"url",
"=",
"'%s/%s/repost'",
"%",
"(",
"REQUESTS_URL",
",",
"token",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"{",
"'Authorization'",
":",
"self",
".",
"auth"... | 33 | 16.263158 |
def set_ylim_cb(self, redraw=True):
"""Set plot limit based on user values."""
try:
ymin = float(self.w.y_lo.get_text())
except Exception:
set_min = True
else:
set_min = False
try:
ymax = float(self.w.y_hi.get_text())
except Exception:
set_max = True
else:
set_max = False
if set_min or set_max:
self.tab_plot.draw()
self.set_ylimits_widgets(set_min=set_min, set_max=set_max)
if not (set_min and set_max):
self.tab_plot.ax.set_ylim(ymin, ymax)
if redraw:
self.tab_plot.draw() | [
"def",
"set_ylim_cb",
"(",
"self",
",",
"redraw",
"=",
"True",
")",
":",
"try",
":",
"ymin",
"=",
"float",
"(",
"self",
".",
"w",
".",
"y_lo",
".",
"get_text",
"(",
")",
")",
"except",
"Exception",
":",
"set_min",
"=",
"True",
"else",
":",
"set_min... | 27.625 | 17.375 |
def get_subgraphs_by_annotation(graph, annotation, sentinel=None):
"""Stratify the given graph into sub-graphs based on the values for edges' annotations.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to group by
:param Optional[str] sentinel: The value to stick unannotated edges into. If none, does not keep undefined.
:rtype: dict[str,pybel.BELGraph]
"""
if sentinel is not None:
subgraphs = _get_subgraphs_by_annotation_keep_undefined(graph, annotation, sentinel)
else:
subgraphs = _get_subgraphs_by_annotation_disregard_undefined(graph, annotation)
cleanup(graph, subgraphs)
return subgraphs | [
"def",
"get_subgraphs_by_annotation",
"(",
"graph",
",",
"annotation",
",",
"sentinel",
"=",
"None",
")",
":",
"if",
"sentinel",
"is",
"not",
"None",
":",
"subgraphs",
"=",
"_get_subgraphs_by_annotation_keep_undefined",
"(",
"graph",
",",
"annotation",
",",
"senti... | 42.0625 | 25.6875 |
def create_api_environment(self):
"""Get an instance of Api Environment services facade."""
return ApiEnvironment(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | [
"def",
"create_api_environment",
"(",
"self",
")",
":",
"return",
"ApiEnvironment",
"(",
"self",
".",
"networkapi_url",
",",
"self",
".",
"user",
",",
"self",
".",
"password",
",",
"self",
".",
"user_ldap",
")"
] | 33.571429 | 10 |
def get_n_tail(tmax, tail_temps):
"""determines number of included tail checks in best fit segment"""
#print "tail_temps: {0}, tmax: {0}".format(tail_temps, tmax)
t_index = 0
adj_tmax = 0
if tmax < tail_temps[0]:
return 0
try:
t_index = list(tail_temps).index(tmax)
except: # finds correct tmax if there was no tail check performed at tmax
for temp in tail_temps:
if temp <= tmax:
adj_tmax = temp
t_index = list(tail_temps).index(adj_tmax)
incl_temps = tail_temps[0:t_index+1] # b/c not inclusive
return len(incl_temps) | [
"def",
"get_n_tail",
"(",
"tmax",
",",
"tail_temps",
")",
":",
"#print \"tail_temps: {0}, tmax: {0}\".format(tail_temps, tmax)",
"t_index",
"=",
"0",
"adj_tmax",
"=",
"0",
"if",
"tmax",
"<",
"tail_temps",
"[",
"0",
"]",
":",
"return",
"0",
"try",
":",
"t_index",... | 37.5 | 16.5625 |
def setLedN(self, led_number=0):
"""Set the 'current LED' value for writePatternLine
:param led_number: LED to adjust, 0=all, 1=LEDA, 2=LEDB
"""
if ( self.dev == None ): return ''
buf = [REPORT_ID, ord('l'), led_number, 0,0,0,0,0,0]
self.write(buf) | [
"def",
"setLedN",
"(",
"self",
",",
"led_number",
"=",
"0",
")",
":",
"if",
"(",
"self",
".",
"dev",
"==",
"None",
")",
":",
"return",
"''",
"buf",
"=",
"[",
"REPORT_ID",
",",
"ord",
"(",
"'l'",
")",
",",
"led_number",
",",
"0",
",",
"0",
",",
... | 41.428571 | 10 |
def Session(access_token=None, env=None):
"""Create an HTTP session.
Parameters
----------
access_token : str
Mapbox access token string (optional).
env : dict, optional
A dict that subsitutes for os.environ.
Returns
-------
requests.Session
"""
if env is None:
env = os.environ.copy()
access_token = (
access_token or
env.get('MapboxAccessToken') or
env.get('MAPBOX_ACCESS_TOKEN'))
session = requests.Session()
session.params.update(access_token=access_token)
session.headers.update({
'User-Agent': 'mapbox-sdk-py/{0} {1}'.format(
__version__, requests.utils.default_user_agent())})
return session | [
"def",
"Session",
"(",
"access_token",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"if",
"env",
"is",
"None",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"access_token",
"=",
"(",
"access_token",
"or",
"env",
".",
"get",
"(",
... | 27.038462 | 16 |
def compute(self):
"""Computes the tendencies for all state variables given current state
and specified input.
The function first computes all diagnostic processes. They don't produce
any tendencies directly but they may affect the other processes (such as
change in solar distribution). Subsequently, all tendencies and
diagnostics for all explicit processes are computed.
Tendencies due to implicit and adjustment processes need to be
calculated from a state that is already adjusted after explicit
alteration. For that reason the explicit tendencies are applied to the
states temporarily. Now all tendencies from implicit processes are
calculated by matrix inversions and similar to the explicit tendencies,
the implicit ones are applied to the states temporarily. Subsequently,
all instantaneous adjustments are computed.
Then the changes that were made to the states from explicit and implicit
processes are removed again as this
:class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()`
function is supposed to calculate only tendencies and not apply them
to the states.
Finally, all calculated tendencies from all processes are collected
for each state, summed up and stored in the dictionary
``self.tendencies``, which is an attribute of the time-dependent-process
object, for which the
:class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()`
method has been called.
**Object attributes** \n
During method execution following object attributes are modified:
:ivar dict tendencies: dictionary that holds tendencies for all states
is calculated for current timestep through
adding up tendencies from explicit, implicit and
adjustment processes.
:ivar dict diagnostics: process diagnostic dictionary is updated
by diagnostic dictionaries of subprocesses
after computation of tendencies.
"""
# First reset tendencies to zero -- recomputing them is the point of this method
for varname in self.tendencies:
self.tendencies[varname] *= 0.
if not self.has_process_type_list:
self._build_process_type_list()
tendencies = {}
ignored = self._compute_type('diagnostic')
tendencies['explicit'] = self._compute_type('explicit')
# Tendencies due to implicit and adjustment processes need to be
# calculated from a state that is already adjusted after explicit stuff
# So apply the tendencies temporarily and then remove them again
for name, var in self.state.items():
var += tendencies['explicit'][name] * self.timestep
# Now compute all implicit processes -- matrix inversions
tendencies['implicit'] = self._compute_type('implicit')
# Same deal ... temporarily apply tendencies from implicit step
for name, var in self.state.items():
var += tendencies['implicit'][name] * self.timestep
# Finally compute all instantaneous adjustments -- expressed as explicit forward step
tendencies['adjustment'] = self._compute_type('adjustment')
# Now remove the changes from the model state
for name, var in self.state.items():
var -= ( (tendencies['implicit'][name] + tendencies['explicit'][name]) *
self.timestep)
# Sum up all subprocess tendencies
for proctype in ['explicit', 'implicit', 'adjustment']:
for varname, tend in tendencies[proctype].items():
self.tendencies[varname] += tend
# Finally compute my own tendencies, if any
self_tend = self._compute()
# Adjustment processes _compute method returns absolute adjustment
# Needs to be converted to rate of change
if self.time_type is 'adjustment':
for varname, adj in self_tend.items():
self_tend[varname] /= self.timestep
for varname, tend in self_tend.items():
self.tendencies[varname] += tend
return self.tendencies | [
"def",
"compute",
"(",
"self",
")",
":",
"# First reset tendencies to zero -- recomputing them is the point of this method",
"for",
"varname",
"in",
"self",
".",
"tendencies",
":",
"self",
".",
"tendencies",
"[",
"varname",
"]",
"*=",
"0.",
"if",
"not",
"self",
"."... | 52.414634 | 23.987805 |
def _sbd(x, y):
"""
>>> _sbd([1,1,1], [1,1,1])
(-2.2204460492503131e-16, array([1, 1, 1]))
>>> _sbd([0,1,2], [1,2,3])
(0.043817112532485103, array([1, 2, 3]))
>>> _sbd([1,2,3], [0,1,2])
(0.043817112532485103, array([0, 1, 2]))
"""
ncc = _ncc_c(x, y)
idx = ncc.argmax()
dist = 1 - ncc[idx]
yshift = roll_zeropad(y, (idx + 1) - max(len(x), len(y)))
return dist, yshift | [
"def",
"_sbd",
"(",
"x",
",",
"y",
")",
":",
"ncc",
"=",
"_ncc_c",
"(",
"x",
",",
"y",
")",
"idx",
"=",
"ncc",
".",
"argmax",
"(",
")",
"dist",
"=",
"1",
"-",
"ncc",
"[",
"idx",
"]",
"yshift",
"=",
"roll_zeropad",
"(",
"y",
",",
"(",
"idx",... | 27 | 13.4 |
def generate_raml_docs(module, fields, shared_types, user=None, title="My API", version="v1", api_root="api", base_uri="http://mysite.com/{version}"):
"""Return a RAML file of a Pale module's documentation as a string.
The user argument is optional. If included, it expects the user to be an object with an "is_admin"
boolean attribute. Any endpoint protected with a "@requires_permission" decorator will require
user.is_admin == True to display documentation on that endpoint.
The arguments for 'title', 'version', and 'base_uri' are added to the RAML header info.
"""
output = StringIO()
# Add the RAML header info
output.write('#%RAML 1.0 \n')
output.write('title: ' + title + ' \n')
output.write('baseUri: ' + base_uri + ' \n')
output.write('version: ' + version + '\n')
output.write('mediaType: application/json\n\n')
output.write('documentation:\n')
output.write(' - title: Welcome\n')
output.write(' content: |\n')
output.write("""\
Welcome to the Loudr API Docs.\n
You'll find comprehensive documentation on our endpoints and resources here.
""")
output.write("\n###############\n# Resource Types:\n###############\n\n")
output.write('types:\n')
basic_fields = []
for field_module in inspect.getmembers(fields, inspect.ismodule):
for field_class in inspect.getmembers(field_module[1], inspect.isclass):
basic_fields.append(field_class[1])
pale_basic_types = generate_basic_type_docs(basic_fields, {})
output.write("\n# Pale Basic Types:\n\n")
output.write(pale_basic_types[0])
shared_fields = []
for shared_type in shared_types:
for field_class in inspect.getmembers(shared_type, inspect.isclass):
shared_fields.append(field_class[1])
pale_shared_types = generate_basic_type_docs(shared_fields, pale_basic_types[1])
output.write("\n# Pale Shared Types:\n\n")
output.write(pale_shared_types[0])
raml_resource_types = generate_raml_resource_types(module)
output.write("\n# API Resource Types:\n\n")
output.write(raml_resource_types)
raml_resources = generate_raml_resources(module, api_root, user)
output.write("\n\n###############\n# API Endpoints:\n###############\n\n")
output.write(raml_resources)
raml_docs = output.getvalue()
output.close()
return raml_docs | [
"def",
"generate_raml_docs",
"(",
"module",
",",
"fields",
",",
"shared_types",
",",
"user",
"=",
"None",
",",
"title",
"=",
"\"My API\"",
",",
"version",
"=",
"\"v1\"",
",",
"api_root",
"=",
"\"api\"",
",",
"base_uri",
"=",
"\"http://mysite.com/{version}\"",
... | 37.206349 | 24.650794 |
def activation_shell_code(self, shell=None):
"""Get shell code that should be run to activate this suite."""
from rez.shells import create_shell
from rez.rex import RexExecutor
executor = RexExecutor(interpreter=create_shell(shell),
parent_variables=["PATH"],
shebang=False)
executor.env.PATH.append(self.tools_path)
return executor.get_output().strip() | [
"def",
"activation_shell_code",
"(",
"self",
",",
"shell",
"=",
"None",
")",
":",
"from",
"rez",
".",
"shells",
"import",
"create_shell",
"from",
"rez",
".",
"rex",
"import",
"RexExecutor",
"executor",
"=",
"RexExecutor",
"(",
"interpreter",
"=",
"create_shell... | 45.5 | 10.6 |
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps' | [
"def",
"generate",
"(",
"env",
")",
":",
"global",
"PSAction",
"if",
"PSAction",
"is",
"None",
":",
"PSAction",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"'$PSCOM'",
",",
"'$PSCOMSTR'",
")",
"global",
"DVIPSAction",
"if",
"DVIPSAction",
"is",
"None"... | 43.214286 | 24.642857 |
def get_parallel_raw_data(self, other):
""" Get the raw data that is similar to the specified other segment
"""
start, end = other.byte_bounds_offset()
r = self.rawdata[start:end]
if other.rawdata.is_indexed:
r = r.get_indexed[other.order]
return r | [
"def",
"get_parallel_raw_data",
"(",
"self",
",",
"other",
")",
":",
"start",
",",
"end",
"=",
"other",
".",
"byte_bounds_offset",
"(",
")",
"r",
"=",
"self",
".",
"rawdata",
"[",
"start",
":",
"end",
"]",
"if",
"other",
".",
"rawdata",
".",
"is_indexe... | 37.625 | 5.375 |
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary() | [
"def",
"load_text",
"(",
"self",
",",
"text",
",",
"tokenizer",
"=",
"None",
")",
":",
"if",
"tokenizer",
":",
"words",
"=",
"[",
"x",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"tokenizer",
"(",
"text",
")",
"]",
"else",
":",
"words",
"=",
"self"... | 33.071429 | 16 |
def _enforce_instance(model_or_class):
"""
It's a common mistake to not initialize a
schematics class. We should handle that by just
calling the default constructor.
"""
if isinstance(model_or_class, type) and issubclass(model_or_class, BaseType):
return model_or_class()
return model_or_class | [
"def",
"_enforce_instance",
"(",
"model_or_class",
")",
":",
"if",
"isinstance",
"(",
"model_or_class",
",",
"type",
")",
"and",
"issubclass",
"(",
"model_or_class",
",",
"BaseType",
")",
":",
"return",
"model_or_class",
"(",
")",
"return",
"model_or_class"
] | 35.666667 | 9.666667 |
def handle_stream_features(self, stream, features):
"""Process incoming <stream:features/> element.
[initiating entity only]
The received features element is available in `features`.
"""
logger.debug(u"Handling stream features: {0}".format(
element_to_unicode(features)))
element = features.find(FEATURE_BIND)
if element is None:
logger.debug("No <bind/> in features")
return None
resource = stream.settings["resource"]
self.bind(stream, resource)
return StreamFeatureHandled("Resource binding", mandatory = True) | [
"def",
"handle_stream_features",
"(",
"self",
",",
"stream",
",",
"features",
")",
":",
"logger",
".",
"debug",
"(",
"u\"Handling stream features: {0}\"",
".",
"format",
"(",
"element_to_unicode",
"(",
"features",
")",
")",
")",
"element",
"=",
"features",
".",
... | 40.25 | 16.5 |
def aggregate_periods(self, periods):
"""Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int
"""
try:
fieldname = self.raster_field.name
except TypeError:
raise exceptions.FieldDoesNotExist('Raster field not found')
arrays = self.arrays(fieldname)
arr = arrays[0]
if len(arrays) > 1:
if getattr(arr, 'ndim', 0) > 2:
arrays = np.vstack(arrays)
fill = getattr(arr, 'fill_value', None)
arr = np.ma.masked_values(arrays, fill, copy=False)
# Try to reshape using equal sizes first and fall back to unequal
# splits.
try:
means = arr.reshape((periods, -1)).mean(axis=1)
except ValueError:
means = np.array([a.mean() for a in np.array_split(arr, periods)])
obj = self[0]
setattr(obj, fieldname, means)
return [obj] | [
"def",
"aggregate_periods",
"(",
"self",
",",
"periods",
")",
":",
"try",
":",
"fieldname",
"=",
"self",
".",
"raster_field",
".",
"name",
"except",
"TypeError",
":",
"raise",
"exceptions",
".",
"FieldDoesNotExist",
"(",
"'Raster field not found'",
")",
"arrays"... | 37.615385 | 16.269231 |
def recover(
data: bytes,
signature: Signature,
hasher: Callable[[bytes], bytes] = eth_sign_sha3,
) -> Address:
""" eth_recover address from data hash and signature """
_hash = hasher(data)
# ecdsa_recover accepts only standard [0,1] v's so we add support also for [27,28] here
# anything else will raise BadSignature
if signature[-1] >= 27: # support (0,1,27,28) v values
signature = Signature(signature[:-1] + bytes([signature[-1] - 27]))
try:
sig = keys.Signature(signature_bytes=signature)
public_key = keys.ecdsa_recover(message_hash=_hash, signature=sig)
except BadSignature as e:
raise InvalidSignature from e
return public_key.to_canonical_address() | [
"def",
"recover",
"(",
"data",
":",
"bytes",
",",
"signature",
":",
"Signature",
",",
"hasher",
":",
"Callable",
"[",
"[",
"bytes",
"]",
",",
"bytes",
"]",
"=",
"eth_sign_sha3",
",",
")",
"->",
"Address",
":",
"_hash",
"=",
"hasher",
"(",
"data",
")"... | 38.421053 | 21.368421 |
def members(name, members_list, root=None):
'''
Replaces members of the group with a provided list.
CLI Example:
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
'''
cmd = 'chgrpmem -m = {0} {1}'.format(members_list, name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
return not retcode | [
"def",
"members",
"(",
"name",
",",
"members_list",
",",
"root",
"=",
"None",
")",
":",
"cmd",
"=",
"'chgrpmem -m = {0} {1}'",
".",
"format",
"(",
"members_list",
",",
"name",
")",
"retcode",
"=",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"cmd",
",",
"... | 28.333333 | 24.066667 |
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (new_metadata is not None and cur_metadata is not None and
not array_equivalent(new_metadata, cur_metadata)):
raise ValueError("cannot append a categorical with "
"different categories to the existing") | [
"def",
"validate_metadata",
"(",
"self",
",",
"handler",
")",
":",
"if",
"self",
".",
"meta",
"==",
"'category'",
":",
"new_metadata",
"=",
"self",
".",
"metadata",
"cur_metadata",
"=",
"handler",
".",
"read_metadata",
"(",
"self",
".",
"cname",
")",
"if",... | 58.777778 | 16.777778 |
def ascwl(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
basedir=None,
):
"""Return the ``attrs`` attribute values of *inst* as a dict.
Support ``jsonldPredicate`` in a field metadata for generating
mappings from lists.
Adapted from ``attr._funcs``.
"""
attrs = fields(inst.__class__)
rv = dict_factory()
def convert_value(v):
"""Convert special types."""
if isinstance(v, Path):
v = str(v)
return os.path.relpath(v, str(basedir)) if basedir else v
return v
for a in attrs:
if a.name.startswith('__'):
continue
a_name = a.name.rstrip('_')
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv[a_name] = ascwl(
v,
recurse=True,
filter=filter,
dict_factory=dict_factory,
basedir=basedir,
)
elif isinstance(v, (tuple, list, set)):
cf = v.__class__ if retain_collection_types is True else list
rv[a_name] = cf([
ascwl(
i,
recurse=True,
filter=filter,
dict_factory=dict_factory,
basedir=basedir,
) if has(i.__class__) else i for i in v
])
if 'jsonldPredicate' in a.metadata:
k = a.metadata['jsonldPredicate'].get('mapSubject')
if k:
vv = dict_factory()
for i in rv[a_name]:
kk = i.pop(k)
vv[kk] = i
rv[a_name] = vv
elif isinstance(v, dict):
df = dict_factory
rv[a_name] = df((
ascwl(
kk,
dict_factory=df,
basedir=basedir,
) if has(kk.__class__) else convert_value(kk),
ascwl(
vv,
dict_factory=df,
basedir=basedir,
) if has(vv.__class__) else vv
) for kk, vv in iteritems(v))
else:
rv[a_name] = convert_value(v)
else:
rv[a_name] = convert_value(v)
if isinstance(inst, CWLClass):
rv['class'] = inst.__class__.__name__
return rv | [
"def",
"ascwl",
"(",
"inst",
",",
"recurse",
"=",
"True",
",",
"filter",
"=",
"None",
",",
"dict_factory",
"=",
"dict",
",",
"retain_collection_types",
"=",
"False",
",",
"basedir",
"=",
"None",
",",
")",
":",
"attrs",
"=",
"fields",
"(",
"inst",
".",
... | 30.091954 | 15.54023 |
def get(self, request, *args, **kwargs):
"""
Return a :class:`.django.http.JsonResponse`.
Example::
{
'results': [
{
'text': "foo",
'id': 123
}
],
'more': true
}
"""
self.widget = self.get_widget_or_404()
self.term = kwargs.get('term', request.GET.get('term', ''))
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': self.widget.label_from_instance(obj),
'id': obj.pk,
}
for obj in context['object_list']
],
'more': context['page_obj'].has_next()
}) | [
"def",
"get",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"widget",
"=",
"self",
".",
"get_widget_or_404",
"(",
")",
"self",
".",
"term",
"=",
"kwargs",
".",
"get",
"(",
"'term'",
",",
"request",
... | 27.612903 | 16.709677 |
def evaluate(dataset, predictions, output_folder, **kwargs):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[BoxList]): each item in the list represents the
prediction results for one image.
output_folder: output folder, to save evaluation files or results.
**kwargs: other args.
Returns:
evaluation result
"""
args = dict(
dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs
)
if isinstance(dataset, datasets.COCODataset):
return coco_evaluation(**args)
elif isinstance(dataset, datasets.PascalVOCDataset):
return voc_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name)) | [
"def",
"evaluate",
"(",
"dataset",
",",
"predictions",
",",
"output_folder",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"dict",
"(",
"dataset",
"=",
"dataset",
",",
"predictions",
"=",
"predictions",
",",
"output_folder",
"=",
"output_folder",
",",
"*... | 41.238095 | 19.380952 |
def save_animation(filename, pianoroll, window, hop=1, fps=None, is_drum=False,
beat_resolution=None, downbeats=None, preset='default',
cmap='Blues', xtick='auto', ytick='octave', xticklabel=True,
yticklabel='auto', tick_loc=None, tick_direction='in',
label='both', grid='both', grid_linestyle=':',
grid_linewidth=.5, **kwargs):
"""
Save a pianoroll to an animation in video or GIF format.
Parameters
----------
filename : str
The filename to which the animation is saved.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
window : int
The window size to be applied to `pianoroll` for the animation.
hop : int
The hop size to be applied to `pianoroll` for the animation.
fps : int
The number of frames per second in the resulting video or GIF file.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if not HAS_MOVIEPY:
raise ImportError("moviepy package is required for animation supports.")
def make_frame(t):
"""Return an image of the frame for time t."""
fig = plt.gcf()
ax = plt.gca()
f_idx = int(t * fps)
start = hop * f_idx
end = start + window
to_plot = transposed[:, start:end]
extent = (start, end - 1, 0, 127)
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax,
origin='lower', interpolation='none', extent=extent)
if xtick == 'beat':
next_major_idx = beat_resolution - start % beat_resolution
if start % beat_resolution < beat_resolution//2:
next_minor_idx = beat_resolution//2 - start % beat_resolution
else:
next_minor_idx = (beat_resolution//2 - start % beat_resolution
+ beat_resolution)
xticks_major = np.arange(next_major_idx, window, beat_resolution)
xticks_minor = np.arange(next_minor_idx, window, beat_resolution)
if end % beat_resolution < beat_resolution//2:
last_minor_idx = beat_resolution//2 - end % beat_resolution
else:
last_minor_idx = (beat_resolution//2 - end % beat_resolution
+ beat_resolution)
xtick_labels = np.arange((start + next_minor_idx)//beat_resolution,
(end + last_minor_idx)//beat_resolution)
ax.set_xticks(xticks_major)
ax.set_xticklabels('')
ax.set_xticks(xticks_minor, minor=True)
ax.set_xticklabels(xtick_labels, minor=True)
ax.tick_params(axis='x', which='minor', width=0)
return mplfig_to_npimage(fig)
if xtick == 'auto':
xtick = 'beat' if beat_resolution is not None else 'step'
fig, ax = plt.subplots()
plot_pianoroll(ax, pianoroll[:window], is_drum, beat_resolution, downbeats,
preset=preset, cmap=cmap, xtick=xtick, ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
num_frame = int((pianoroll.shape[0] - window) / hop)
duration = int(num_frame / fps)
if (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.floating)):
vmax = 1
elif np.issubdtype(pianoroll.dtype, np.integer):
vmax = 127
else:
raise TypeError("Unsupported data type for `pianoroll`.")
vmin = 0
transposed = pianoroll.T
animation = VideoClip(make_frame, duration=duration)
if filename.endswith('.gif'):
animation.write_gif(filename, fps, **kwargs)
else:
animation.write_videofile(filename, fps, **kwargs)
plt.close() | [
"def",
"save_animation",
"(",
"filename",
",",
"pianoroll",
",",
"window",
",",
"hop",
"=",
"1",
",",
"fps",
"=",
"None",
",",
"is_drum",
"=",
"False",
",",
"beat_resolution",
"=",
"None",
",",
"downbeats",
"=",
"None",
",",
"preset",
"=",
"'default'",
... | 44.315789 | 22.763158 |
def index_of(self, name):
"""
Returns the index of the actor with the given name.
:param name: the name of the Actor to find
:type name: str
:return: the index, -1 if not found
:rtype: int
"""
result = -1
for index, actor in enumerate(self.actors):
if actor.name == name:
result = index
break
return result | [
"def",
"index_of",
"(",
"self",
",",
"name",
")",
":",
"result",
"=",
"-",
"1",
"for",
"index",
",",
"actor",
"in",
"enumerate",
"(",
"self",
".",
"actors",
")",
":",
"if",
"actor",
".",
"name",
"==",
"name",
":",
"result",
"=",
"index",
"break",
... | 27.8 | 14.066667 |
def reduce_fn(x):
"""
Aggregation function to get the first non-zero value.
"""
values = x.values if pd and isinstance(x, pd.Series) else x
for v in values:
if not is_nan(v):
return v
return np.NaN | [
"def",
"reduce_fn",
"(",
"x",
")",
":",
"values",
"=",
"x",
".",
"values",
"if",
"pd",
"and",
"isinstance",
"(",
"x",
",",
"pd",
".",
"Series",
")",
"else",
"x",
"for",
"v",
"in",
"values",
":",
"if",
"not",
"is_nan",
"(",
"v",
")",
":",
"retur... | 25.888889 | 15.666667 |
def main(global_config, **settings):
"""
Get a PyShop WSGI application configured with settings.
"""
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf-8')
settings = dict(settings)
# Scoping sessions for Pyramid ensure session are commit/rollback
# after the template has been rendered
create_engine(settings, scoped=True)
authn_policy = RouteSwitchAuthPolicy(secret=settings['pyshop.cookie_key'],
callback=groupfinder)
authz_policy = ACLPolicy()
route_prefix = settings.get('pyshop.route_prefix')
config = Configurator(settings=settings,
root_factory=RootFactory,
route_prefix=route_prefix,
locale_negotiator=locale_negotiator,
authentication_policy=authn_policy,
authorization_policy=authz_policy)
config.end()
return config.make_wsgi_app() | [
"def",
"main",
"(",
"global_config",
",",
"*",
"*",
"settings",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"reload",
"(",
"sys",
")",
"sys",
".",
"setdefaultencoding",
"(",
"'utf-8'",
")",
"settings",
"=",
"dict",
"(",
... | 36.333333 | 17.074074 |
def addTags(self, tags):
"""Adds the list of tags to current tags. (flickr.photos.addtags)
"""
method = 'flickr.photos.addTags'
if isinstance(tags, list):
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
#load properties again
self._load_properties() | [
"def",
"addTags",
"(",
"self",
",",
"tags",
")",
":",
"method",
"=",
"'flickr.photos.addTags'",
"if",
"isinstance",
"(",
"tags",
",",
"list",
")",
":",
"tags",
"=",
"uniq",
"(",
"tags",
")",
"_dopost",
"(",
"method",
",",
"auth",
"=",
"True",
",",
"p... | 33.5 | 11.5 |
def _MakeRanges(pairs):
"""Turn a list like [(65,97), (66, 98), ..., (90,122)]
into [(65, 90, +32)]."""
ranges = []
last = -100
def evenodd(last, a, b, r):
if a != last+1 or b != _AddDelta(a, r[2]):
return False
r[1] = a
return True
def evenoddpair(last, a, b, r):
if a != last+2:
return False
delta = r[2]
d = delta
if type(delta) is not str:
return False
if delta.endswith('Skip'):
d = delta[:-4]
else:
delta = d + 'Skip'
if b != _AddDelta(a, d):
return False
r[1] = a
r[2] = delta
return True
for a, b in pairs:
if ranges and evenodd(last, a, b, ranges[-1]):
pass
elif ranges and evenoddpair(last, a, b, ranges[-1]):
pass
else:
ranges.append([a, a, _Delta(a, b)])
last = a
return ranges | [
"def",
"_MakeRanges",
"(",
"pairs",
")",
":",
"ranges",
"=",
"[",
"]",
"last",
"=",
"-",
"100",
"def",
"evenodd",
"(",
"last",
",",
"a",
",",
"b",
",",
"r",
")",
":",
"if",
"a",
"!=",
"last",
"+",
"1",
"or",
"b",
"!=",
"_AddDelta",
"(",
"a",
... | 20.947368 | 20.921053 |
def get_calendar_events(self, **kwargs):
"""
List calendar events.
:calls: `GET /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
return PaginatedList(
CalendarEvent,
self.__requester,
'GET',
'calendar_events',
_kwargs=combine_kwargs(**kwargs)
) | [
"def",
"get_calendar_events",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"canvasapi",
".",
"calendar_event",
"import",
"CalendarEvent",
"return",
"PaginatedList",
"(",
"CalendarEvent",
",",
"self",
".",
"__requester",
",",
"'GET'",
",",
"'calendar_e... | 32.052632 | 19.315789 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return TaggerId(key)
if key not in TaggerId._member_map_:
extend_enum(TaggerId, key, default)
return TaggerId[key] | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"TaggerId",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"TaggerId",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Tag... | 36.857143 | 7.714286 |
def get_tasks(self, task_id=None, state='completed', json_file=None):
"""Load all project Tasks."""
if self.project is None:
raise ProjectError
loader = create_tasks_loader(self.project.id, task_id,
state, json_file, self.all)
self.tasks = loader.load()
self._check_project_has_tasks()
self.tasks_df = dataframer.create_data_frame(self.tasks) | [
"def",
"get_tasks",
"(",
"self",
",",
"task_id",
"=",
"None",
",",
"state",
"=",
"'completed'",
",",
"json_file",
"=",
"None",
")",
":",
"if",
"self",
".",
"project",
"is",
"None",
":",
"raise",
"ProjectError",
"loader",
"=",
"create_tasks_loader",
"(",
... | 39.181818 | 18.545455 |
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name) | [
"def",
"_initialize",
"(",
"g",
"=",
"globals",
"(",
")",
")",
":",
"manager",
"=",
"ResourceManager",
"(",
")",
"g",
"[",
"'_manager'",
"]",
"=",
"manager",
"for",
"name",
"in",
"dir",
"(",
"manager",
")",
":",
"if",
"not",
"name",
".",
"startswith"... | 37.571429 | 11.285714 |
def erase_text(self, locator, click=True, clear=False, backspace=0, params=None):
"""
Various ways to erase text from web element.
:param locator: locator tuple or WebElement instance
:param click: clicks the input field
:param clear: clears the input field
:param backspace: how many times to hit backspace
:param params: (optional) locator params
:return: None
"""
element = locator
if not isinstance(element, WebElement):
element = self.get_visible_element(locator, params)
if click:
self.click(element)
if clear:
element.clear()
if backspace:
actions = ActionChains(self.driver)
for _ in range(backspace):
actions.send_keys(Keys.BACKSPACE)
actions.perform() | [
"def",
"erase_text",
"(",
"self",
",",
"locator",
",",
"click",
"=",
"True",
",",
"clear",
"=",
"False",
",",
"backspace",
"=",
"0",
",",
"params",
"=",
"None",
")",
":",
"element",
"=",
"locator",
"if",
"not",
"isinstance",
"(",
"element",
",",
"Web... | 32.307692 | 17.153846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.