text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def FindProxies():
"""Tries to find proxies by interrogating all the user's settings.
This function is a modified urillib.getproxies_registry() from the
standard library. We just store the proxy value in the environment
for urllib to find it.
TODO(user): Iterate through all the possible values if one proxy
fails, in case more than one proxy is specified in different users
profiles.
Returns:
A list of proxies.
"""
proxies = []
for i in range(0, 100):
try:
sid = winreg.EnumKey(winreg.HKEY_USERS, i)
except OSError:
break
try:
subkey = (
sid + "\\Software\\Microsoft\\Windows"
"\\CurrentVersion\\Internet Settings")
internet_settings = winreg.OpenKey(winreg.HKEY_USERS, subkey)
proxy_enable = winreg.QueryValueEx(internet_settings, "ProxyEnable")[0]
if proxy_enable:
# Returned as Unicode but problems if not converted to ASCII
proxy_server = str(
winreg.QueryValueEx(internet_settings, "ProxyServer")[0])
if "=" in proxy_server:
# Per-protocol settings
for p in proxy_server.split(";"):
protocol, address = p.split("=", 1)
# See if address has a type:// prefix
if not re.match("^([^/:]+)://", address):
address = "%s://%s" % (protocol, address)
proxies.append(address)
else:
# Use one setting for all protocols
if proxy_server[:5] == "http:":
proxies.append(proxy_server)
else:
proxies.append("http://%s" % proxy_server)
internet_settings.Close()
except (OSError, ValueError, TypeError):
continue
logging.debug("Found proxy servers: %s", proxies)
return proxies | [
"def",
"FindProxies",
"(",
")",
":",
"proxies",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"100",
")",
":",
"try",
":",
"sid",
"=",
"winreg",
".",
"EnumKey",
"(",
"winreg",
".",
"HKEY_USERS",
",",
"i",
")",
"except",
"OSError",
":",... | 28.55 | 22.566667 |
def RemoveSearchProperties(self, **searchProperties) -> None:
"""
searchProperties: dict, same as searchProperties in `Control.__init__`.
"""
for key in searchProperties:
del self.searchProperties[key]
if key == 'RegexName':
self.regexName = None | [
"def",
"RemoveSearchProperties",
"(",
"self",
",",
"*",
"*",
"searchProperties",
")",
"->",
"None",
":",
"for",
"key",
"in",
"searchProperties",
":",
"del",
"self",
".",
"searchProperties",
"[",
"key",
"]",
"if",
"key",
"==",
"'RegexName'",
":",
"self",
".... | 38.875 | 9.375 |
def create_service_key(self, service_name, key_name):
"""
Create a service key for the given service.
"""
if self.has_key(service_name, key_name):
logging.warning("Reusing existing service key %s" % (key_name))
return self.get_service_key(service_name, key_name)
body = {
'service_instance_guid': self.get_instance_guid(service_name),
'name': key_name
}
return self.api.post('/v2/service_keys', body) | [
"def",
"create_service_key",
"(",
"self",
",",
"service_name",
",",
"key_name",
")",
":",
"if",
"self",
".",
"has_key",
"(",
"service_name",
",",
"key_name",
")",
":",
"logging",
".",
"warning",
"(",
"\"Reusing existing service key %s\"",
"%",
"(",
"key_name",
... | 35.5 | 20.071429 |
def confirm(text, default=True):
"""
Console confirmation dialog based on raw_input.
"""
if default:
legend = "[y]/n"
else:
legend = "y/[n]"
res = ""
while (res != "y") and (res != "n"):
res = raw_input(text + " ({}): ".format(legend)).lower()
if not res and default:
res = "y"
elif not res and not default:
res = "n"
if res[0] == "y":
return True
else:
return False | [
"def",
"confirm",
"(",
"text",
",",
"default",
"=",
"True",
")",
":",
"if",
"default",
":",
"legend",
"=",
"\"[y]/n\"",
"else",
":",
"legend",
"=",
"\"y/[n]\"",
"res",
"=",
"\"\"",
"while",
"(",
"res",
"!=",
"\"y\"",
")",
"and",
"(",
"res",
"!=",
"... | 24.421053 | 15.789474 |
def get_tree_members(self):
""" Retrieves all members from this node of the tree down."""
members = []
queue = deque()
queue.appendleft(self)
visited = set()
while len(queue):
node = queue.popleft()
if node not in visited:
members.extend(node.get_member_info())
queue.extendleft(node.get_children())
visited.add(node)
return [{attribute: member.get(attribute) for attribute in self.attr_list} for member in members if member] | [
"def",
"get_tree_members",
"(",
"self",
")",
":",
"members",
"=",
"[",
"]",
"queue",
"=",
"deque",
"(",
")",
"queue",
".",
"appendleft",
"(",
"self",
")",
"visited",
"=",
"set",
"(",
")",
"while",
"len",
"(",
"queue",
")",
":",
"node",
"=",
"queue"... | 32.764706 | 21.235294 |
def get_asset_search_session(self):
"""Gets an asset search session.
return: (osid.repository.AssetSearchSession) - an
``AssetSearchSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_search()`` is ``true``.*
"""
if not self.supports_asset_search():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssetSearchSession(runtime=self._runtime) | [
"def",
"get_asset_search_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_asset_search",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"AssetSearchSession",
"(",
"run... | 40.866667 | 15.066667 |
def create_from_fits(cls, fitsfile, norm_type='eflux',
hdu_scan="SCANDATA",
hdu_energies="EBOUNDS",
irow=None):
"""Create a CastroData object from a tscube FITS file.
Parameters
----------
fitsfile : str
Name of the fits file
norm_type : str
Type of normalization to use. Valid options are:
* norm : Normalization w.r.t. to test source
* flux : Flux of the test source ( ph cm^-2 s^-1 )
* eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 )
* npred: Number of predicted photons (Not implemented)
* dnde : Differential flux of the test source ( ph cm^-2 s^-1
MeV^-1 )
hdu_scan : str
Name of the FITS HDU with the scan data
hdu_energies : str
Name of the FITS HDU with the energy binning and
normalization data
irow : int or None
If none, then this assumes that there is a single row in
the scan data table Otherwise, this specifies which row of
the table to use
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if irow is not None:
tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow]
else:
tab_s = Table.read(fitsfile, hdu=hdu_scan)
tab_e = Table.read(fitsfile, hdu=hdu_energies)
tab_s = convert_sed_cols(tab_s)
tab_e = convert_sed_cols(tab_e)
return cls.create_from_tables(norm_type, tab_s, tab_e) | [
"def",
"create_from_fits",
"(",
"cls",
",",
"fitsfile",
",",
"norm_type",
"=",
"'eflux'",
",",
"hdu_scan",
"=",
"\"SCANDATA\"",
",",
"hdu_energies",
"=",
"\"EBOUNDS\"",
",",
"irow",
"=",
"None",
")",
":",
"if",
"irow",
"is",
"not",
"None",
":",
"tab_s",
... | 33.208333 | 20.770833 |
def item_gebouw_adapter(obj, request):
"""
Adapter for rendering an object of
:class:`crabpy.gateway.crab.Gebouw` to json.
"""
return {
'id': obj.id,
'aard': {
'id': obj.aard.id,
'naam': obj.aard.naam,
'definitie': obj.aard.definitie
},
'status': {
'id': obj.status.id,
'naam': obj.status.naam,
'definitie': obj.status.definitie
},
'geometriemethode': {
'id': obj.methode.id,
'naam': obj.methode.naam,
'definitie': obj.methode.definitie
},
'geometrie': obj.geometrie,
'metadata': {
'begin_tijd': obj.metadata.begin_tijd,
'begin_datum': obj.metadata.begin_datum,
'begin_bewerking': {
'id': obj.metadata.begin_bewerking.id,
'naam': obj.metadata.begin_bewerking.naam,
'definitie': obj.metadata.begin_bewerking.definitie
},
'begin_organisatie': {
'id': obj.metadata.begin_organisatie.id,
'naam': obj.metadata.begin_organisatie.naam,
'definitie': obj.metadata.begin_organisatie.definitie
}
}
} | [
"def",
"item_gebouw_adapter",
"(",
"obj",
",",
"request",
")",
":",
"return",
"{",
"'id'",
":",
"obj",
".",
"id",
",",
"'aard'",
":",
"{",
"'id'",
":",
"obj",
".",
"aard",
".",
"id",
",",
"'naam'",
":",
"obj",
".",
"aard",
".",
"naam",
",",
"'def... | 32.394737 | 14.710526 |
def reset():
"""
Reset the timer at the current level in the hierarchy (i.e. might or
might not be the root).
Notes:
Erases timing data but preserves relationship to the hierarchy. If the
current timer level was not previously stopped, any timing data from this
timer (including subdivisions) will be discarded and not added to the next
higher level in the data structure. If the current timer was previously
stopped, then its data has already been pushed into the next higher level.
Returns:
float: The current time.
Raises:
LoopError: If in a timed loop.
"""
if f.t.in_loop:
raise LoopError("Cannot reset a timer while it is in timed loop.")
f.t.reset()
f.refresh_shortcuts()
return f.t.start_t | [
"def",
"reset",
"(",
")",
":",
"if",
"f",
".",
"t",
".",
"in_loop",
":",
"raise",
"LoopError",
"(",
"\"Cannot reset a timer while it is in timed loop.\"",
")",
"f",
".",
"t",
".",
"reset",
"(",
")",
"f",
".",
"refresh_shortcuts",
"(",
")",
"return",
"f",
... | 34.217391 | 26.391304 |
def from_dicts(cls, mesh_name, vert_dict, normal_dict):
"""Returns a wavefront .obj string using pre-triangulated vertex dict and normal dict as reference."""
# Put header in string
wavefront_str = "o {name}\n".format(name=mesh_name)
# Write Vertex data from vert_dict
for wall in vert_dict:
for vert in vert_dict[wall]:
wavefront_str += "v {0} {1} {2}\n".format(*vert)
# Write (false) UV Texture data
wavefront_str += "vt 1.0 1.0\n"
# Write Normal data from normal_dict
for wall, norm in normal_dict.items():
wavefront_str += "vn {0} {1} {2}\n".format(*norm)
# Write Face Indices (1-indexed)
vert_idx = 0
for wall in vert_dict:
for _ in range(0, len(vert_dict[wall]), 3):
wavefront_str += 'f '
for vert in range(3): # 3 vertices in each face
vert_idx += 1
wavefront_str += "{v}/1/{n} ".format(v=vert_idx, n=wall+1)
wavefront_str = wavefront_str[:-1] + '\n' # Cutoff trailing space and add a newline.
# Return Wavefront string
return WavefrontWriter(string=wavefront_str) | [
"def",
"from_dicts",
"(",
"cls",
",",
"mesh_name",
",",
"vert_dict",
",",
"normal_dict",
")",
":",
"# Put header in string",
"wavefront_str",
"=",
"\"o {name}\\n\"",
".",
"format",
"(",
"name",
"=",
"mesh_name",
")",
"# Write Vertex data from vert_dict",
"for",
"wal... | 40.033333 | 18.3 |
def read_union(fo, writer_schema, reader_schema=None):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index]) | [
"def",
"read_union",
"(",
"fo",
",",
"writer_schema",
",",
"reader_schema",
"=",
"None",
")",
":",
"# schema resolution",
"index",
"=",
"read_long",
"(",
"fo",
")",
"if",
"reader_schema",
":",
"# Handle case where the reader schema is just a single type (not union)",
"i... | 44.681818 | 18.363636 |
def new(self, items:Iterator, processor:PreProcessors=None, **kwargs)->'ItemList':
"Create a new `ItemList` from `items`, keeping the same attributes."
processor = ifnone(processor, self.processor)
copy_d = {o:getattr(self,o) for o in self.copy_new}
kwargs = {**copy_d, **kwargs}
return self.__class__(items=items, processor=processor, **kwargs) | [
"def",
"new",
"(",
"self",
",",
"items",
":",
"Iterator",
",",
"processor",
":",
"PreProcessors",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'ItemList'",
":",
"processor",
"=",
"ifnone",
"(",
"processor",
",",
"self",
".",
"processor",
")",
"cop... | 63.333333 | 24.333333 |
def R(X, destination, a1, a2, b):
"""A single Salsa20 row operation"""
a = (X[a1] + X[a2]) & 0xffffffff
X[destination] ^= ((a << b) | (a >> (32 - b))) | [
"def",
"R",
"(",
"X",
",",
"destination",
",",
"a1",
",",
"a2",
",",
"b",
")",
":",
"a",
"=",
"(",
"X",
"[",
"a1",
"]",
"+",
"X",
"[",
"a2",
"]",
")",
"&",
"0xffffffff",
"X",
"[",
"destination",
"]",
"^=",
"(",
"(",
"a",
"<<",
"b",
")",
... | 31.8 | 12.2 |
def get_tab(self, tab_name, allow_disabled=False):
"""Returns a specific tab from this tab group.
If the tab is not allowed or not enabled this method returns ``None``.
If the tab is disabled but you wish to return it anyway, you can pass
``True`` to the allow_disabled argument.
"""
tab = self._tabs.get(tab_name, None)
if tab and tab._allowed and (tab._enabled or allow_disabled):
return tab
return None | [
"def",
"get_tab",
"(",
"self",
",",
"tab_name",
",",
"allow_disabled",
"=",
"False",
")",
":",
"tab",
"=",
"self",
".",
"_tabs",
".",
"get",
"(",
"tab_name",
",",
"None",
")",
"if",
"tab",
"and",
"tab",
".",
"_allowed",
"and",
"(",
"tab",
".",
"_en... | 39.333333 | 20.416667 |
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = decimal.Decimal(self._timestamp)
if self.fraction_of_second is not None:
fraction_of_second = decimal.Decimal(self.fraction_of_second)
if self._precision == dfdatetime_definitions.PRECISION_1_NANOSECOND:
fraction_of_second /= self._NANOSECONDS_PER_SECOND
else:
fraction_of_second /= self._100_NANOSECONDS_PER_SECOND
self._normalized_timestamp += fraction_of_second
return self._normalized_timestamp | [
"def",
"_GetNormalizedTimestamp",
"(",
"self",
")",
":",
"if",
"self",
".",
"_normalized_timestamp",
"is",
"None",
":",
"if",
"self",
".",
"_timestamp",
"is",
"not",
"None",
":",
"self",
".",
"_normalized_timestamp",
"=",
"decimal",
".",
"Decimal",
"(",
"sel... | 38.708333 | 23.666667 |
def _rt_update_docindices(self, element, docstart, docend):
"""Updates the docstart, docend, start and end attributes for the
specified element using the new limits for the docstring."""
#see how many characters have to be added/removed from the end
#of the current doc limits.
delta = element.docend - docend
element.docstart = docstart
element.docend = docend
element.start += delta
element.end += delta
return delta | [
"def",
"_rt_update_docindices",
"(",
"self",
",",
"element",
",",
"docstart",
",",
"docend",
")",
":",
"#see how many characters have to be added/removed from the end",
"#of the current doc limits.",
"delta",
"=",
"element",
".",
"docend",
"-",
"docend",
"element",
".",
... | 40.75 | 12.583333 |
def cas(key, value, old_value):
'''
Check and set a value in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.cas <key> <value> <old_value>
'''
store = load()
if key not in store:
return False
if store[key] != old_value:
return False
store[key] = value
dump(store)
return True | [
"def",
"cas",
"(",
"key",
",",
"value",
",",
"old_value",
")",
":",
"store",
"=",
"load",
"(",
")",
"if",
"key",
"not",
"in",
"store",
":",
"return",
"False",
"if",
"store",
"[",
"key",
"]",
"!=",
"old_value",
":",
"return",
"False",
"store",
"[",
... | 17.5 | 24.5 |
def check_dir(self, dirname):
"""Check and create directory
:param dirname: file name
:type dirname; str
:return: None
"""
try:
os.makedirs(dirname)
dir_stat = os.stat(dirname)
print("Created the directory: %s, stat: %s" % (dirname, dir_stat))
if not dir_stat.st_uid == self.uid:
os.chown(dirname, self.uid, self.gid)
os.chmod(dirname, 0o775)
dir_stat = os.stat(dirname)
print("Changed directory ownership and permissions: %s, stat: %s"
% (dirname, dir_stat))
self.pre_log.append(("DEBUG",
"Daemon '%s' directory %s checking... "
"User uid: %s, directory stat: %s."
% (self.name, dirname, os.getuid(), dir_stat)))
self.pre_log.append(("INFO",
"Daemon '%s' directory %s did not exist, I created it. "
"I set ownership for this directory to %s:%s."
% (self.name, dirname, self.user, self.group)))
except OSError as exp:
if exp.errno == errno.EEXIST and os.path.isdir(dirname):
# Directory still exists...
pass
else:
self.pre_log.append(("ERROR",
"Daemon directory '%s' did not exist, "
"and I could not create. Exception: %s"
% (dirname, exp)))
self.exit_on_error("Daemon directory '%s' did not exist, "
"and I could not create.'. Exception: %s"
% (dirname, exp), exit_code=3) | [
"def",
"check_dir",
"(",
"self",
",",
"dirname",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"dir_stat",
"=",
"os",
".",
"stat",
"(",
"dirname",
")",
"print",
"(",
"\"Created the directory: %s, stat: %s\"",
"%",
"(",
"dirname",
",",
... | 45.5 | 21.35 |
def p_lpartselect(self, p):
'lpartselect : identifier LBRACKET expression COLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_lpartselect",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Partselect",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"p",
"[",
"5",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
... | 51.5 | 20 |
def is_sw_writable(self):
"""
Field is writable by software
"""
sw = self.get_property('sw')
return sw in (rdltypes.AccessType.rw, rdltypes.AccessType.rw1,
rdltypes.AccessType.w, rdltypes.AccessType.w1) | [
"def",
"is_sw_writable",
"(",
"self",
")",
":",
"sw",
"=",
"self",
".",
"get_property",
"(",
"'sw'",
")",
"return",
"sw",
"in",
"(",
"rdltypes",
".",
"AccessType",
".",
"rw",
",",
"rdltypes",
".",
"AccessType",
".",
"rw1",
",",
"rdltypes",
".",
"Access... | 32.5 | 15.25 |
def get_progress(self):
"""
Give a rough estimate of the progress done.
"""
pos = self.reader.reader.tell()
return min((pos - self.region_start) /
float(self.region_end - self.region_start),
1.0) | [
"def",
"get_progress",
"(",
"self",
")",
":",
"pos",
"=",
"self",
".",
"reader",
".",
"reader",
".",
"tell",
"(",
")",
"return",
"min",
"(",
"(",
"pos",
"-",
"self",
".",
"region_start",
")",
"/",
"float",
"(",
"self",
".",
"region_end",
"-",
"self... | 33.25 | 9.25 |
def get_package_info_from_line(tpip_pkg, line):
"""Given a line of text from metadata, extract semantic info"""
lower_line = line.lower()
try:
metadata_key, metadata_value = lower_line.split(':', 1)
except ValueError:
return
metadata_key = metadata_key.strip()
metadata_value = metadata_value.strip()
if metadata_value == 'unknown':
return
# extract exact matches
if metadata_key in TPIP_FIELD_MAPPINGS:
tpip_pkg[TPIP_FIELD_MAPPINGS[metadata_key]] = metadata_value
return
if metadata_key.startswith('version') and not tpip_pkg.get('PkgVersion'):
# ... but if not, we'll use whatever we find
tpip_pkg['PkgVersion'] = metadata_value
return
# Handle british and american spelling of licence/license
if 'licen' in lower_line:
if metadata_key.startswith('classifier') or '::' in metadata_value:
license = lower_line.rsplit(':')[-1].strip().lower()
license = license_cleanup(license)
if license:
tpip_pkg.setdefault('PkgLicenses', []).append(license) | [
"def",
"get_package_info_from_line",
"(",
"tpip_pkg",
",",
"line",
")",
":",
"lower_line",
"=",
"line",
".",
"lower",
"(",
")",
"try",
":",
"metadata_key",
",",
"metadata_value",
"=",
"lower_line",
".",
"split",
"(",
"':'",
",",
"1",
")",
"except",
"ValueE... | 34.09375 | 21.5 |
def device_information(name, identifier):
"""Create a new DEVICE_INFO_MESSAGE."""
# pylint: disable=no-member
message = create(protobuf.DEVICE_INFO_MESSAGE)
info = message.inner()
info.uniqueIdentifier = identifier
info.name = name
info.localizedModelName = 'iPhone'
info.systemBuildVersion = '14G60'
info.applicationBundleIdentifier = 'com.apple.TVRemote'
info.applicationBundleVersion = '273.12'
info.protocolVersion = 1
info.lastSupportedMessageType = 58
info.supportsExtendedMotion = True
return message | [
"def",
"device_information",
"(",
"name",
",",
"identifier",
")",
":",
"# pylint: disable=no-member",
"message",
"=",
"create",
"(",
"protobuf",
".",
"DEVICE_INFO_MESSAGE",
")",
"info",
"=",
"message",
".",
"inner",
"(",
")",
"info",
".",
"uniqueIdentifier",
"="... | 36.6 | 8.133333 |
def note_create(self, post_id, coor_x, coor_y, width, height, body):
"""Function to create a note (Requires login) (UNTESTED).
Parameters:
post_id (int):
coor_x (int): The x coordinates of the note in pixels,
with respect to the top-left corner of the image.
coor_y (int): The y coordinates of the note in pixels,
with respect to the top-left corner of the image.
width (int): The width of the note in pixels.
height (int): The height of the note in pixels.
body (str): The body of the note.
"""
params = {
'note[post_id]': post_id,
'note[x]': coor_x,
'note[y]': coor_y,
'note[width]': width,
'note[height]': height,
'note[body]': body
}
return self._get('notes.json', params, method='POST', auth=True) | [
"def",
"note_create",
"(",
"self",
",",
"post_id",
",",
"coor_x",
",",
"coor_y",
",",
"width",
",",
"height",
",",
"body",
")",
":",
"params",
"=",
"{",
"'note[post_id]'",
":",
"post_id",
",",
"'note[x]'",
":",
"coor_x",
",",
"'note[y]'",
":",
"coor_y",
... | 42.272727 | 17.818182 |
def get_output_dir(self, nb):
"""Open a notebook and determine the output directory from the name"""
self.package_dir, self.package_name = self.get_package_dir_name(nb)
return join(self.package_dir, self.package_name) | [
"def",
"get_output_dir",
"(",
"self",
",",
"nb",
")",
":",
"self",
".",
"package_dir",
",",
"self",
".",
"package_name",
"=",
"self",
".",
"get_package_dir_name",
"(",
"nb",
")",
"return",
"join",
"(",
"self",
".",
"package_dir",
",",
"self",
".",
"packa... | 47.6 | 20.4 |
def transpile_modname_source_target(self, spec, modname, source, target):
"""
Calls the original version.
"""
return self.simple_transpile_modname_source_target(
spec, modname, source, target) | [
"def",
"transpile_modname_source_target",
"(",
"self",
",",
"spec",
",",
"modname",
",",
"source",
",",
"target",
")",
":",
"return",
"self",
".",
"simple_transpile_modname_source_target",
"(",
"spec",
",",
"modname",
",",
"source",
",",
"target",
")"
] | 33 | 14.142857 |
def create_server(self, admin_login, admin_password, location):
'''
Create a new Azure SQL Database server.
admin_login:
The administrator login name for the new server.
admin_password:
The administrator login password for the new server.
location:
The region to deploy the new server.
'''
_validate_not_none('admin_login', admin_login)
_validate_not_none('admin_password', admin_password)
_validate_not_none('location', location)
response = self.perform_post(
self._get_servers_path(),
_SqlManagementXmlSerializer.create_server_to_xml(
admin_login,
admin_password,
location
)
)
return _SqlManagementXmlSerializer.xml_to_create_server_response(
response.body) | [
"def",
"create_server",
"(",
"self",
",",
"admin_login",
",",
"admin_password",
",",
"location",
")",
":",
"_validate_not_none",
"(",
"'admin_login'",
",",
"admin_login",
")",
"_validate_not_none",
"(",
"'admin_password'",
",",
"admin_password",
")",
"_validate_not_no... | 34.6 | 19.64 |
def _parse_cached(html_dump):
"""Parse html string from cached html files.
Parameters
----------
html_dump : string
HTML content
Returns
-------
translations : list
Translations list.
"""
soup = BeautifulSoup(html_dump, "html.parser")
translations = []
for trans in soup.find_all("div", class_="translation"):
word = tuple(t.get_text() for t in trans.select("div.word > h2"))
trans_list = []
for part in trans.find_all("div", class_="part-of-speech"):
pn = part.find("p", class_="part-name")
if pn:
pn = pn.get_text().strip("[]")
meanings = []
for meaning in part.find_all("div", class_="meaning"):
m = [mn.get_text() for mn in meaning.select("li > span")]
examples = []
for e in meaning.find_all("p"):
examples.append([ex.get_text() for ex in e.find_all("span")])
meanings.append(Meaning(m, examples))
trans_list.append(PartOfSpeech(pn, meanings))
translations.append(Translation(word, trans_list))
return translations | [
"def",
"_parse_cached",
"(",
"html_dump",
")",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"html_dump",
",",
"\"html.parser\"",
")",
"translations",
"=",
"[",
"]",
"for",
"trans",
"in",
"soup",
".",
"find_all",
"(",
"\"div\"",
",",
"class_",
"=",
"\"translation\... | 35.8125 | 19.78125 |
def get_lat_long(self, callsign, timestamp=timestamp_now):
""" Returns Latitude and Longitude for a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Containing Latitude and Longitude
Raises:
KeyError: No data found for callsign
Example:
The following code returns Latitude & Longitude for "DH1TW"
>>> from pyhamtools import LookupLib, Callinfo
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> cic = Callinfo(my_lookuplib)
>>> cic.get_lat_long("DH1TW")
{
'latitude': 51.0,
'longitude': -10.0
}
Note:
Unfortunately, in most cases the returned Latitude and Longitude are not very precise.
Clublog and Country-files.com use the country's capital coordinates in most cases, if no
dedicated entry in the database exists. Best results will be retrieved with QRZ.com Lookup.
"""
callsign_data = self.get_all(callsign, timestamp=timestamp)
return {
const.LATITUDE: callsign_data[const.LATITUDE],
const.LONGITUDE: callsign_data[const.LONGITUDE]
} | [
"def",
"get_lat_long",
"(",
"self",
",",
"callsign",
",",
"timestamp",
"=",
"timestamp_now",
")",
":",
"callsign_data",
"=",
"self",
".",
"get_all",
"(",
"callsign",
",",
"timestamp",
"=",
"timestamp",
")",
"return",
"{",
"const",
".",
"LATITUDE",
":",
"ca... | 36.333333 | 26.055556 |
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph | [
"def",
"_parse_txtinfo",
"(",
"self",
",",
"data",
")",
":",
"graph",
"=",
"self",
".",
"_init_graph",
"(",
")",
"for",
"link",
"in",
"data",
":",
"graph",
".",
"add_edge",
"(",
"link",
"[",
"'source'",
"]",
",",
"link",
"[",
"'target'",
"]",
",",
... | 35.636364 | 10 |
def get_module_files(src_directory, blacklist, list_all=False):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist: iterable
list of files or directories to ignore.
:type list_all: bool
:param list_all:
get files from all paths, including ones without __init__.py
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
if directory in blacklist:
continue
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not list_all and "__init__.py" not in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = os.path.join(directory, filename)
files.append(src)
return files | [
"def",
"get_module_files",
"(",
"src_directory",
",",
"blacklist",
",",
"list_all",
"=",
"False",
")",
":",
"files",
"=",
"[",
"]",
"for",
"directory",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"src_directory",
")",
":",
"if",
"dir... | 32.885714 | 18.114286 |
def _get_partition(self, org_name, part_name=None):
"""send get partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_name is None:
part_name = self._part_name
url = self._update_part_url % (org_name, part_name)
res = self._send_request("GET", url, '', 'partition')
if res and res.status_code in self._resp_ok:
return res.json() | [
"def",
"_get_partition",
"(",
"self",
",",
"org_name",
",",
"part_name",
"=",
"None",
")",
":",
"if",
"part_name",
"is",
"None",
":",
"part_name",
"=",
"self",
".",
"_part_name",
"url",
"=",
"self",
".",
"_update_part_url",
"%",
"(",
"org_name",
",",
"pa... | 39.083333 | 11.166667 |
def pipe(self, target):
"""
Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2)
"""
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self) | [
"def",
"pipe",
"(",
"self",
",",
"target",
")",
":",
"if",
"callable",
"(",
"target",
")",
":",
"sender",
",",
"recver",
"=",
"self",
".",
"hub",
".",
"pipe",
"(",
")",
"# link the two ends in the closure with a strong reference to",
"# prevent them from being gar... | 31.810345 | 20.293103 |
def draw(self, milliseconds):
"""Draws all of the objects in our world."""
cam = Ragnarok.get_world().Camera
camPos = cam.get_world_pos()
self.__sort_draw()
self.clear_backbuffer()
for obj in self.__draw_objects:
#Check to see if the object is visible to the camera before doing anything to it.
if obj.is_static or obj.is_visible_to_camera(cam):
#Offset all of our objects by the camera offset.
old_pos = obj.coords
xVal = obj.coords.X - camPos.X
yVal = obj.coords.Y - camPos.Y
obj.coords = Vector2(xVal, yVal)
obj.draw(milliseconds, self.backbuffer)
obj.coords = old_pos | [
"def",
"draw",
"(",
"self",
",",
"milliseconds",
")",
":",
"cam",
"=",
"Ragnarok",
".",
"get_world",
"(",
")",
".",
"Camera",
"camPos",
"=",
"cam",
".",
"get_world_pos",
"(",
")",
"self",
".",
"__sort_draw",
"(",
")",
"self",
".",
"clear_backbuffer",
"... | 46.25 | 11.375 |
def redo(self):
"""Redo the last action.
This will call `redo()` on all controllers involved in this action.
"""
controllers = self.forward()
if controllers is None:
ups = ()
else:
ups = tuple([controller.redo() for
controller in controllers])
if self.process_ups is not None:
return self.process_ups(ups)
else:
return ups | [
"def",
"redo",
"(",
"self",
")",
":",
"controllers",
"=",
"self",
".",
"forward",
"(",
")",
"if",
"controllers",
"is",
"None",
":",
"ups",
"=",
"(",
")",
"else",
":",
"ups",
"=",
"tuple",
"(",
"[",
"controller",
".",
"redo",
"(",
")",
"for",
"con... | 27.875 | 16.4375 |
def filter(args):
"""
%prog filter fastafile 100
Filter the FASTA file to contain records with size >= or <= certain cutoff.
"""
p = OptionParser(filter.__doc__)
p.add_option("--less", default=False, action="store_true",
help="filter the sizes < certain cutoff [default: >=]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, cutoff = args
try:
cutoff = int(cutoff)
except ValueError:
sys.exit(not p.print_help())
f = Fasta(fastafile, lazy=True)
fw = must_open(opts.outfile, "w")
for name, rec in f.iteritems_ordered():
if opts.less and len(rec) >= cutoff:
continue
if (not opts.less) and len(rec) < cutoff:
continue
SeqIO.write([rec], fw, "fasta")
fw.flush()
return fw.name | [
"def",
"filter",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"filter",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--less\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"filter the sizes < cer... | 23.432432 | 20.675676 |
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0 | [
"def",
"_get_data_from_empty_list",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"return",
"... | 62.75 | 16.5 |
def augment_cycle(self, amount, cycle):
'''
API:
augment_cycle(self, amount, cycle):
Description:
Augments 'amount' unit of flow along cycle.
Pre:
Arcs should have 'flow' attribute.
Inputs:
amount: An integer representing the amount to augment
cycle: A list representing a cycle
Post:
Changes 'flow' attributes of arcs.
'''
index = 0
k = len(cycle)
while index<(k-1):
i = cycle[index]
j = cycle[index+1]
if (i,j) in self.edge_attr:
flow_ij = self.edge_attr[(i,j)]['flow']
self.edge_attr[(i,j)]['flow'] = flow_ij+amount
else:
flow_ji = self.edge_attr[(j,i)]['flow']
self.edge_attr[(j,i)]['flow'] = flow_ji-amount
index += 1
i = cycle[k-1]
j = cycle[0]
if (i,j) in self.edge_attr:
flow_ij = self.edge_attr[(i,j)]['flow']
self.edge_attr[(i,j)]['flow'] = flow_ij+amount
else:
flow_ji = self.edge_attr[(j,i)]['flow']
self.edge_attr[(j,i)]['flow'] = flow_ji-amount | [
"def",
"augment_cycle",
"(",
"self",
",",
"amount",
",",
"cycle",
")",
":",
"index",
"=",
"0",
"k",
"=",
"len",
"(",
"cycle",
")",
"while",
"index",
"<",
"(",
"k",
"-",
"1",
")",
":",
"i",
"=",
"cycle",
"[",
"index",
"]",
"j",
"=",
"cycle",
"... | 34.735294 | 16.852941 |
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
logging_conf = cls.config.get('core', 'logging_conf_file', None)
if logging_conf is None:
return False
if not os.path.exists(logging_conf):
# FileNotFoundError added only in Python 3.3
# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(logging_conf)
return True | [
"def",
"_conf",
"(",
"cls",
",",
"opts",
")",
":",
"logging_conf",
"=",
"cls",
".",
"config",
".",
"get",
"(",
"'core'",
",",
"'logging_conf_file'",
",",
"None",
")",
"if",
"logging_conf",
"is",
"None",
":",
"return",
"False",
"if",
"not",
"os",
".",
... | 45.076923 | 24.846154 |
def frequency(self):
"""
How often the recurrence repeats.
("YEARLY", "MONTHLY", "WEEKLY", "DAILY")
"""
freqOptions = ("YEARLY", "MONTHLY", "WEEKLY", "DAILY")
if self.rule._freq < len(freqOptions):
return freqOptions[self.rule._freq]
else:
return "unsupported_frequency_{}".format(self.rule._freq) | [
"def",
"frequency",
"(",
"self",
")",
":",
"freqOptions",
"=",
"(",
"\"YEARLY\"",
",",
"\"MONTHLY\"",
",",
"\"WEEKLY\"",
",",
"\"DAILY\"",
")",
"if",
"self",
".",
"rule",
".",
"_freq",
"<",
"len",
"(",
"freqOptions",
")",
":",
"return",
"freqOptions",
"[... | 36.8 | 12 |
def reread(self):
"""
Read configuration file and substitute references into checks conf
"""
logger.debug("Loading settings from %s",
os.path.abspath(self.filename))
conf = self.read_conf()
changed = self.creds.reread()
checks = self.parser.parse_checks(conf)
if self.checks != checks:
self.checks = checks
return True
else:
return changed | [
"def",
"reread",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Loading settings from %s\"",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"filename",
")",
")",
"conf",
"=",
"self",
".",
"read_conf",
"(",
")",
"changed",
"=",
"self"... | 32.5 | 12.071429 |
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug | [
"def",
"parse_to_slug",
"(",
"words",
",",
"maxlen",
"=",
"24",
")",
":",
"slug",
"=",
"''",
"maxlen",
"=",
"min",
"(",
"maxlen",
",",
"len",
"(",
"words",
")",
")",
"for",
"c",
"in",
"words",
":",
"if",
"len",
"(",
"slug",
")",
"==",
"maxlen",
... | 24.3 | 18.366667 |
def _make_mask(self, data, lon_str=LON_STR, lat_str=LAT_STR):
"""Construct the mask that defines a region on a given data's grid."""
mask = False
for west, east, south, north in self.mask_bounds:
if west < east:
mask_lon = (data[lon_str] > west) & (data[lon_str] < east)
else:
mask_lon = (data[lon_str] < west) | (data[lon_str] > east)
mask_lat = (data[lat_str] > south) & (data[lat_str] < north)
mask |= mask_lon & mask_lat
return mask | [
"def",
"_make_mask",
"(",
"self",
",",
"data",
",",
"lon_str",
"=",
"LON_STR",
",",
"lat_str",
"=",
"LAT_STR",
")",
":",
"mask",
"=",
"False",
"for",
"west",
",",
"east",
",",
"south",
",",
"north",
"in",
"self",
".",
"mask_bounds",
":",
"if",
"west"... | 48.909091 | 19.636364 |
def set_playback(self, playback):
"""Send Playback command."""
req_url = ENDPOINTS["setPlayback"].format(self._ip_address)
params = {"playback": playback}
return request(req_url, params=params) | [
"def",
"set_playback",
"(",
"self",
",",
"playback",
")",
":",
"req_url",
"=",
"ENDPOINTS",
"[",
"\"setPlayback\"",
"]",
".",
"format",
"(",
"self",
".",
"_ip_address",
")",
"params",
"=",
"{",
"\"playback\"",
":",
"playback",
"}",
"return",
"request",
"("... | 44.2 | 8.2 |
def parse_rfc3339_utc_string(rfc3339_utc_string):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to millisconds, microseconds, sometimes
# nanoseconds, and sometimes only seconds:
# * 2016-11-14T23:05:56Z
# * 2016-11-14T23:05:56.010Z
# * 2016-11-14T23:05:56.010429Z
# * 2016-11-14T23:05:56.010429380Z
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
# We might reconsider this approach in the future; it was originally
# established when dates were only used for display.
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
# Create a UTC datestamp from parsed components
# 1- Turn components 0-5 from strings to integers
# 2- If the last component does not exist, set it to 0.
# If it does exist, make sure to interpret it as milliseconds.
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
# When nanoseconds are provided, we round
micros = int(round(int(fraction) / 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)
try:
return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e) | [
"def",
"parse_rfc3339_utc_string",
"(",
"rfc3339_utc_string",
")",
":",
"# The timestamp from the Google Operations are all in RFC3339 format, but",
"# they are sometimes formatted to millisconds, microseconds, sometimes",
"# nanoseconds, and sometimes only seconds:",
"# * 2016-11-14T23:05:56Z",
... | 34.017544 | 21.947368 |
def set_max_string_length(self, length=None):
"""stub"""
if self.get_max_string_length_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_cardinal(
length,
self.get_max_string_length_metadata()):
raise InvalidArgument()
if self.my_osid_object_form.min_string_length is not None and \
length < self.my_osid_object_form.min_string_length + 1:
raise InvalidArgument()
self.my_osid_object_form._my_map['maxStringLength'] = length
self._max_string_length = length | [
"def",
"set_max_string_length",
"(",
"self",
",",
"length",
"=",
"None",
")",
":",
"if",
"self",
".",
"get_max_string_length_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"my_osid_object_for... | 47.153846 | 14.846154 |
def rnormal(mu, tau, size=None):
"""
Random normal variates.
"""
return np.random.normal(mu, 1. / np.sqrt(tau), size) | [
"def",
"rnormal",
"(",
"mu",
",",
"tau",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"normal",
"(",
"mu",
",",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"tau",
")",
",",
"size",
")"
] | 25.8 | 7.4 |
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
count = int(vals[i])
i += 1
for _ in range(count):
obj = GroundTemperature()
obj.read(vals[i:i + obj.field_count])
self.add_ground_temperature(obj)
i += obj.field_count | [
"def",
"read",
"(",
"self",
",",
"vals",
")",
":",
"i",
"=",
"0",
"count",
"=",
"int",
"(",
"vals",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"for",
"_",
"in",
"range",
"(",
"count",
")",
":",
"obj",
"=",
"GroundTemperature",
"(",
")",
"obj",
".",
... | 25 | 16.333333 |
def get_extension_attribute(self, ext_name, key):
"""
Banana banana
"""
attributes = self.extension_attributes.get(ext_name)
if not attributes:
return None
return attributes.get(key) | [
"def",
"get_extension_attribute",
"(",
"self",
",",
"ext_name",
",",
"key",
")",
":",
"attributes",
"=",
"self",
".",
"extension_attributes",
".",
"get",
"(",
"ext_name",
")",
"if",
"not",
"attributes",
":",
"return",
"None",
"return",
"attributes",
".",
"ge... | 29.375 | 10.625 |
def abs(x, context=None):
"""
Return abs(x).
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_abs,
(BigFloat._implicit_convert(x),),
context,
) | [
"def",
"abs",
"(",
"x",
",",
"context",
"=",
"None",
")",
":",
"return",
"_apply_function_in_current_context",
"(",
"BigFloat",
",",
"mpfr",
".",
"mpfr_abs",
",",
"(",
"BigFloat",
".",
"_implicit_convert",
"(",
"x",
")",
",",
")",
",",
"context",
",",
")... | 18.545455 | 16.727273 |
def metadefs_namespace_list(request,
filters=None,
sort_dir='asc',
sort_key='namespace',
marker=None,
paginate=False):
"""Retrieve a listing of Namespaces
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen namespace.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at'). Default
is namespace. The way base namespaces are loaded into glance
typically at first deployment is done in a single transaction
giving them a potentially unpredictable sort result when using
create_at.
:param filters: specifies addition fields to filter on such as
resource_types.
:returns A tuple of three values:
1) Current page results
2) A boolean of whether or not there are previous page(s).
3) A boolean of whether or not there are more page(s).
"""
# Listing namespaces requires the v2 API. If not supported we return an
# empty array so callers don't need to worry about version checking.
if get_version() < 2:
return [], False, False
if filters is None:
filters = {}
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters}
if marker:
kwargs['marker'] = marker
kwargs['sort_dir'] = sort_dir
kwargs['sort_key'] = sort_key
namespaces_iter = glanceclient(request, '2').metadefs_namespace.list(
page_size=request_size, limit=limit, **kwargs)
# Filter the namespaces based on the provided properties_target since this
# is not supported by the metadata namespaces API.
resource_types = filters.get('resource_types')
properties_target = filters.get('properties_target')
if resource_types and properties_target:
namespaces_iter = filter_properties_target(namespaces_iter,
resource_types,
properties_target)
has_prev_data = False
has_more_data = False
if paginate:
namespaces = list(itertools.islice(namespaces_iter, request_size))
# first and middle page condition
if len(namespaces) > page_size:
namespaces.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'desc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
namespaces = list(namespaces_iter)
namespaces = [Namespace(namespace) for namespace in namespaces]
return namespaces, has_more_data, has_prev_data | [
"def",
"metadefs_namespace_list",
"(",
"request",
",",
"filters",
"=",
"None",
",",
"sort_dir",
"=",
"'asc'",
",",
"sort_key",
"=",
"'namespace'",
",",
"marker",
"=",
"None",
",",
"paginate",
"=",
"False",
")",
":",
"# Listing namespaces requires the v2 API. If no... | 40.309524 | 18.595238 |
def _get_name(self):
"""Find name of scoring function."""
if self.name is not None:
return self.name
if self.scoring_ is None:
return 'score'
if isinstance(self.scoring_, str):
return self.scoring_
if isinstance(self.scoring_, partial):
return self.scoring_.func.__name__
if isinstance(self.scoring_, _BaseScorer):
return self.scoring_._score_func.__name__
return self.scoring_.__name__ | [
"def",
"_get_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"name",
"is",
"not",
"None",
":",
"return",
"self",
".",
"name",
"if",
"self",
".",
"scoring_",
"is",
"None",
":",
"return",
"'score'",
"if",
"isinstance",
"(",
"self",
".",
"scoring_",
",... | 37.692308 | 8.307692 |
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None) | [
"def",
"image_by_id",
"(",
"self",
",",
"id",
")",
":",
"if",
"not",
"id",
":",
"return",
"None",
"return",
"next",
"(",
"(",
"image",
"for",
"image",
"in",
"self",
".",
"images",
"(",
")",
"if",
"image",
"[",
"'Id'",
"]",
"==",
"id",
")",
",",
... | 28 | 13.75 |
def attach_session(self):
"""Return ``$ tmux attach-session`` aka alias: ``$ tmux attach``."""
proc = self.cmd('attach-session', '-t%s' % self.id)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr) | [
"def",
"attach_session",
"(",
"self",
")",
":",
"proc",
"=",
"self",
".",
"cmd",
"(",
"'attach-session'",
",",
"'-t%s'",
"%",
"self",
".",
"id",
")",
"if",
"proc",
".",
"stderr",
":",
"raise",
"exc",
".",
"LibTmuxException",
"(",
"proc",
".",
"stderr",... | 39 | 17 |
def __draw_cluster_item_multi_dimension(self, ax, pair, item, cluster_descr):
"""!
@brief Draw cluster chunk defined by pair coordinates in data space with dimension greater than 1.
@param[in] ax (axis): Matplotlib axis that is used to display chunk of cluster point.
@param[in] pair (list): Coordinate of the point that should be displayed.
@param[in] item (list): Data point or index of data point.
@param[in] cluster_descr (canvas_cluster_descr): Cluster description whose point is visualized.
"""
index_dimension1 = pair[0]
index_dimension2 = pair[1]
if cluster_descr.data is None:
ax.plot(item[index_dimension1], item[index_dimension2],
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)
else:
ax.plot(cluster_descr.data[item][index_dimension1], cluster_descr.data[item][index_dimension2],
color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize) | [
"def",
"__draw_cluster_item_multi_dimension",
"(",
"self",
",",
"ax",
",",
"pair",
",",
"item",
",",
"cluster_descr",
")",
":",
"index_dimension1",
"=",
"pair",
"[",
"0",
"]",
"index_dimension2",
"=",
"pair",
"[",
"1",
"]",
"if",
"cluster_descr",
".",
"data"... | 54.25 | 36.3 |
async def get_txn(self, seq_no: int) -> str:
"""
Find a transaction on the distributed ledger by its sequence number.
:param seq_no: transaction number
:return: json sequence number of transaction, null for no match
"""
LOGGER.debug('BaseAnchor.get_txn >>> seq_no: %s', seq_no)
rv_json = json.dumps({})
req_json = await ledger.build_get_txn_request(self.did, None, seq_no)
resp = json.loads(await self._submit(req_json))
rv_json = self.pool.protocol.txn2data(resp)
LOGGER.debug('BaseAnchor.get_txn <<< %s', rv_json)
return rv_json | [
"async",
"def",
"get_txn",
"(",
"self",
",",
"seq_no",
":",
"int",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'BaseAnchor.get_txn >>> seq_no: %s'",
",",
"seq_no",
")",
"rv_json",
"=",
"json",
".",
"dumps",
"(",
"{",
"}",
")",
"req_json",
"=",
... | 34.111111 | 22.444444 |
def deprecated(new_name: str):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
def decorator(func):
@wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
'Use {0} instead of {1}, {1} will be removed in the future.'
.format(new_name, func.__name__),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
setattr(new_func, '__deprecated', True)
return new_func
return decorator | [
"def",
"deprecated",
"(",
"new_name",
":",
"str",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"new_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'a... | 38.285714 | 15.428571 |
def get_all(
self, target_resource=None, target_resource_group=None, target_resource_type=None, monitor_service=None, monitor_condition=None, severity=None, smart_group_state=None, time_range=None, page_count=None, sort_by=None, sort_order=None, custom_headers=None, raw=False, **operation_config):
"""Get all smartGroups within the subscription.
List all the smartGroups within the specified subscription. .
:param target_resource: Filter by target resource( which is full ARM
ID) Default value is select all.
:type target_resource: str
:param target_resource_group: Filter by target resource group name.
Default value is select all.
:type target_resource_group: str
:param target_resource_type: Filter by target resource type. Default
value is select all.
:type target_resource_type: str
:param monitor_service: Filter by monitor service which is the source
of the alert instance. Default value is select all. Possible values
include: 'Application Insights', 'ActivityLog Administrative',
'ActivityLog Security', 'ActivityLog Recommendation', 'ActivityLog
Policy', 'ActivityLog Autoscale', 'Log Analytics', 'Nagios',
'Platform', 'SCOM', 'ServiceHealth', 'SmartDetector', 'VM Insights',
'Zabbix'
:type monitor_service: str or
~azure.mgmt.alertsmanagement.models.MonitorService
:param monitor_condition: Filter by monitor condition which is the
state of the monitor(alertRule) at monitor service. Default value is
to select all. Possible values include: 'Fired', 'Resolved'
:type monitor_condition: str or
~azure.mgmt.alertsmanagement.models.MonitorCondition
:param severity: Filter by severity. Defaut value is select all.
Possible values include: 'Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4'
:type severity: str or ~azure.mgmt.alertsmanagement.models.Severity
:param smart_group_state: Filter by state of the smart group. Default
value is to select all. Possible values include: 'New',
'Acknowledged', 'Closed'
:type smart_group_state: str or
~azure.mgmt.alertsmanagement.models.AlertState
:param time_range: Filter by time range by below listed values.
Default value is 1 day. Possible values include: '1h', '1d', '7d',
'30d'
:type time_range: str or ~azure.mgmt.alertsmanagement.models.TimeRange
:param page_count: Determines number of alerts returned per page in
response. Permissible value is between 1 to 250. When the
"includeContent" filter is selected, maximum value allowed is 25.
Default value is 25.
:type page_count: int
:param sort_by: Sort the query results by input field Default value
is sort by 'lastModifiedDateTime'. Possible values include:
'alertsCount', 'state', 'severity', 'startDateTime',
'lastModifiedDateTime'
:type sort_by: str or
~azure.mgmt.alertsmanagement.models.SmartGroupsSortByFields
:param sort_order: Sort the query results order in either ascending or
descending. Default value is 'desc' for time fields and 'asc' for
others. Possible values include: 'asc', 'desc'
:type sort_order: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SmartGroupsList or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.alertsmanagement.models.SmartGroupsList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.alertsmanagement.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if target_resource is not None:
query_parameters['targetResource'] = self._serialize.query("target_resource", target_resource, 'str')
if target_resource_group is not None:
query_parameters['targetResourceGroup'] = self._serialize.query("target_resource_group", target_resource_group, 'str')
if target_resource_type is not None:
query_parameters['targetResourceType'] = self._serialize.query("target_resource_type", target_resource_type, 'str')
if monitor_service is not None:
query_parameters['monitorService'] = self._serialize.query("monitor_service", monitor_service, 'str')
if monitor_condition is not None:
query_parameters['monitorCondition'] = self._serialize.query("monitor_condition", monitor_condition, 'str')
if severity is not None:
query_parameters['severity'] = self._serialize.query("severity", severity, 'str')
if smart_group_state is not None:
query_parameters['smartGroupState'] = self._serialize.query("smart_group_state", smart_group_state, 'str')
if time_range is not None:
query_parameters['timeRange'] = self._serialize.query("time_range", time_range, 'str')
if page_count is not None:
query_parameters['pageCount'] = self._serialize.query("page_count", page_count, 'int')
if sort_by is not None:
query_parameters['sortBy'] = self._serialize.query("sort_by", sort_by, 'str')
if sort_order is not None:
query_parameters['sortOrder'] = self._serialize.query("sort_order", sort_order, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SmartGroupsList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | [
"def",
"get_all",
"(",
"self",
",",
"target_resource",
"=",
"None",
",",
"target_resource_group",
"=",
"None",
",",
"target_resource_type",
"=",
"None",
",",
"monitor_service",
"=",
"None",
",",
"monitor_condition",
"=",
"None",
",",
"severity",
"=",
"None",
"... | 55.874016 | 28.661417 |
def args_as_tuple(self):
"""Return arguments as a list."""
result = ("body", )
result = result + (
self.arguments["body-transform"], self.arguments["match-type"])
if self.arguments["key-list"].startswith("["):
result = result + tuple(
tools.to_list(self.arguments["key-list"]))
else:
result = result + (self.arguments["key-list"].strip('"'),)
return result | [
"def",
"args_as_tuple",
"(",
"self",
")",
":",
"result",
"=",
"(",
"\"body\"",
",",
")",
"result",
"=",
"result",
"+",
"(",
"self",
".",
"arguments",
"[",
"\"body-transform\"",
"]",
",",
"self",
".",
"arguments",
"[",
"\"match-type\"",
"]",
")",
"if",
... | 40.545455 | 17.181818 |
def _fill_role_cache(self, principal, overwrite=False):
"""Fill role cache for `principal` (User or Group), in order to avoid
too many queries when checking role access with 'has_role'.
Return role_cache of `principal`
"""
if not self.app_state.use_cache:
return None
if not self._has_role_cache(principal) or overwrite:
self._set_role_cache(principal, self._all_roles(principal))
return self._role_cache(principal) | [
"def",
"_fill_role_cache",
"(",
"self",
",",
"principal",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"app_state",
".",
"use_cache",
":",
"return",
"None",
"if",
"not",
"self",
".",
"_has_role_cache",
"(",
"principal",
")",
"or",
... | 40.5 | 16 |
def from_dict(cls, copula_dict):
"""Create a new instance from the given parameters.
Args:
copula_dict: `dict` with the parameters to replicate the copula.
Like the output of `Bivariate.to_dict`
Returns:
Bivariate: Instance of the copula defined on the parameters.
"""
instance = cls(copula_dict['copula_type'])
instance.theta = copula_dict['theta']
instance.tau = copula_dict['tau']
return instance | [
"def",
"from_dict",
"(",
"cls",
",",
"copula_dict",
")",
":",
"instance",
"=",
"cls",
"(",
"copula_dict",
"[",
"'copula_type'",
"]",
")",
"instance",
".",
"theta",
"=",
"copula_dict",
"[",
"'theta'",
"]",
"instance",
".",
"tau",
"=",
"copula_dict",
"[",
... | 34.857143 | 17.857143 |
def get_fragment(self, gp, **kwargs):
"""
Return a complete fragment for a given gp.
:param gp: A graph pattern
:return:
"""
collector = FragmentCollector(self.__host, gp)
return collector.get_fragment(**kwargs) | [
"def",
"get_fragment",
"(",
"self",
",",
"gp",
",",
"*",
"*",
"kwargs",
")",
":",
"collector",
"=",
"FragmentCollector",
"(",
"self",
".",
"__host",
",",
"gp",
")",
"return",
"collector",
".",
"get_fragment",
"(",
"*",
"*",
"kwargs",
")"
] | 32.5 | 8 |
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n")) | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_filestream",
":",
"if",
"not",
"self",
".",
"_zip",
":",
"self",
".",
"_zip",
"=",
"zipfile",
".",
"ZipFile",
"(",
"self",
".",
"_reader",
"(",
"self",
".",
"_blob_key",
")",
")",
... | 37.815789 | 18.052632 |
def get_version(path=None, module=None):
"""Return the version string.
This function ensures that the version string complies with PEP 440.
The format of our version string is:
- for RELEASE builds:
<major>.<minor>
e.g.
0.1
2.4
- for DEVELOPMENT builds:
<major>.<minor>.dev<num_branch_commits> \
+<branch_name>.g<short_git_sha>[.dirty]
e.g.
1.1.dev34+new.shiny.feature.gfa973da
0.1.dev7+master.gb91ffa6.dirty
- for UNKNOWN builds:
0.0+unknown.[<scm_type>.]<date>
e.g.
0.0+unknown.svn.201402031023
0.0+unknown.201602081715
The <major>.<minor> substring for development builds will be that of the
NEXT (minor) release, in order to allow proper Python version ordering.
Parameters
----------
path : None or string, optional
A file or directory to use to find the SCM or sdist checkout path
(default is the current working directory)
module : None or string, optional
Get version via module name (e.g. __name__ variable), which takes
precedence over path if provided (ignore otherwise)
Returns
-------
version: string
A string representation of the package version
"""
# Check the module option first.
version = get_version_from_module(module)
if version:
return normalised(version)
# Turn path into a valid directory (default is current directory)
if path is None:
path = os.getcwd()
path = os.path.abspath(path)
if os.path.exists(path) and not os.path.isdir(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
raise ValueError('No such package source directory: %r' % (path,))
# Check for an sdist in the process of being installed by pip.
version = get_version_from_unpacked_sdist(path)
if version:
return normalised(version)
# Check the SCM.
scm, version = get_version_from_scm(path)
if version:
return normalised(version)
# Check if there is a katversion file in the given path.
version = get_version_from_file(path)
if version:
return normalised(version)
# None of the above got a version so we will make one up based on the date.
return normalised(date_version(scm)) | [
"def",
"get_version",
"(",
"path",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"# Check the module option first.",
"version",
"=",
"get_version_from_module",
"(",
"module",
")",
"if",
"version",
":",
"return",
"normalised",
"(",
"version",
")",
"# Turn pa... | 31.337838 | 19.878378 |
def is_visible(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you have vision on a grid point. """
# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.visibility[pos] == 2 | [
"def",
"is_visible",
"(",
"self",
",",
"pos",
":",
"Union",
"[",
"Point2",
",",
"Point3",
",",
"Unit",
"]",
")",
"->",
"bool",
":",
"# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19",
"ass... | 68.166667 | 24.833333 |
def money_flow(close_data, high_data, low_data, volume):
"""
Money Flow.
Formula:
MF = VOLUME * TYPICAL PRICE
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
mf = volume * tp(close_data, high_data, low_data)
return mf | [
"def",
"money_flow",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
",",
"volume",
")",
":",
"catch_errors",
".",
"check_for_input_len_diff",
"(",
"close_data",
",",
"high_data",
",",
"low_data",
",",
"volume",
")",
"mf",
"=",
"volume",
"*",
"tp",
"("... | 24.333333 | 16.5 |
def password_enter(self, wallet, password):
"""
Enters the **password** in to **wallet**
:param wallet: Wallet to enter password for
:type wallet: str
:param password: Password to enter
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_enter(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet, "password": password}
resp = self.call('password_enter', payload)
return resp['valid'] == '1' | [
"def",
"password_enter",
"(",
"self",
",",
"wallet",
",",
"password",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
",",
"\"password\"",
":",
"password",
"}",
... | 25.222222 | 21.888889 |
def tokeninfo(self, jwt):
"""Returns user profile based on the user's jwt
Validates a JSON Web Token (signature and expiration) and returns the
user information associated with the user id (sub property) of
the token.
Args:
jwt (str): User's jwt
Returns:
The user profile.
"""
warnings.warn("/tokeninfo will be deprecated in future releases", DeprecationWarning)
return self.post(
url='https://{}/tokeninfo'.format(self.domain),
data={'id_token': jwt},
headers={'Content-Type': 'application/json'}
) | [
"def",
"tokeninfo",
"(",
"self",
",",
"jwt",
")",
":",
"warnings",
".",
"warn",
"(",
"\"/tokeninfo will be deprecated in future releases\"",
",",
"DeprecationWarning",
")",
"return",
"self",
".",
"post",
"(",
"url",
"=",
"'https://{}/tokeninfo'",
".",
"format",
"(... | 31.2 | 23.6 |
def tableToTsv(self, model):
"""
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
"""
first = True
for item in model.select():
if first:
header = "".join(
["{}\t".format(x) for x in model._meta.fields.keys()])
print(header)
first = False
row = "".join(
["{}\t".format(
getattr(item, key)) for key in model._meta.fields.keys()])
print(row) | [
"def",
"tableToTsv",
"(",
"self",
",",
"model",
")",
":",
"first",
"=",
"True",
"for",
"item",
"in",
"model",
".",
"select",
"(",
")",
":",
"if",
"first",
":",
"header",
"=",
"\"\"",
".",
"join",
"(",
"[",
"\"{}\\t\"",
".",
"format",
"(",
"x",
")... | 36 | 15.375 |
def reffs(self):
""" Get all valid reffs for every part of the CtsText
:rtype: MyCapytain.resources.texts.tei.XmlCtsCitation
"""
if not self.citation.is_set():
self.getLabel()
return [
reff for reffs in [self.getValidReff(level=i) for i in range(1, len(self.citation) + 1)] for reff in reffs
] | [
"def",
"reffs",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"citation",
".",
"is_set",
"(",
")",
":",
"self",
".",
"getLabel",
"(",
")",
"return",
"[",
"reff",
"for",
"reffs",
"in",
"[",
"self",
".",
"getValidReff",
"(",
"level",
"=",
"i",
"... | 35.7 | 23.3 |
def _load_certificate(location):
"""
Load a certificate from the given location.
Args:
location (str): The location to load. This can either be an HTTPS URL or an absolute file
path. This is intended to be used with PEM-encoded certificates and therefore assumes
ASCII encoding.
Returns:
str: The PEM-encoded certificate as a unicode string.
Raises:
requests.exception.RequestException: Any exception requests could raise.
IOError: If the location provided could not be opened and read.
"""
if location.startswith('https://'):
_log.info('Downloading x509 certificate from %s', location)
with requests.Session() as session:
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
response = session.get(location, timeout=30)
response.raise_for_status()
return response.text
else:
_log.info('Loading local x509 certificate from %s', location)
with open(location, 'rb') as fd:
return fd.read().decode('ascii') | [
"def",
"_load_certificate",
"(",
"location",
")",
":",
"if",
"location",
".",
"startswith",
"(",
"'https://'",
")",
":",
"_log",
".",
"info",
"(",
"'Downloading x509 certificate from %s'",
",",
"location",
")",
"with",
"requests",
".",
"Session",
"(",
")",
"as... | 39.962963 | 22.407407 |
def create_tar_file(self, full_archive=False):
"""
Create tar file to be compressed
"""
tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
ext = "" if self.compressor == "none" else ".%s" % self.compressor
tar_file_name = tar_file_name + ".tar" + ext
logger.debug("Tar File: " + tar_file_name)
subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
self.get_compression_flag(self.compressor),
tar_file_name,
# for the docker "uber archive,"use archive_dir
# rather than tmp_dir for all the files we tar,
# because all the individual archives are in there
self.tmp_dir if not full_archive else self.archive_dir)),
stderr=subprocess.PIPE)
self.delete_archive_dir()
logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
return tar_file_name | [
"def",
"create_tar_file",
"(",
"self",
",",
"full_archive",
"=",
"False",
")",
":",
"tar_file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"archive_tmp_dir",
",",
"self",
".",
"archive_name",
")",
"ext",
"=",
"\"\"",
"if",
"self",
".",
... | 49 | 16.052632 |
def check_appt(self, complex: str, house: str, appt: str) -> bool:
"""
Check if given appartment exists in the rumetr database
"""
self.check_house(complex, house)
if '%s__%s__%s' % (complex, house, appt) in self._checked_appts:
return True
try:
self.get('developers/{developer}/complexes/{complex}/houses/{house}/appts/{appt}'.format(
developer=self.developer,
complex=complex,
house=house,
appt=appt,
))
except exceptions.Rumetr404Exception:
raise exceptions.RumetrApptNotFound('Unknown appt (house is known) — may be you should create one?')
self._checked_appts.add('%s__%s__%s' % (complex, house, appt))
return True | [
"def",
"check_appt",
"(",
"self",
",",
"complex",
":",
"str",
",",
"house",
":",
"str",
",",
"appt",
":",
"str",
")",
"->",
"bool",
":",
"self",
".",
"check_house",
"(",
"complex",
",",
"house",
")",
"if",
"'%s__%s__%s'",
"%",
"(",
"complex",
",",
... | 39.3 | 22.8 |
def submit(self, timestamp):
"""Internal instance method to submit this task for running immediately.
Does not handle any iteration, end-date, etc., processing."""
Channel(RUN_TASK_CHANNEL).send({'id':self.pk, 'ts': timestamp.timestamp()}) | [
"def",
"submit",
"(",
"self",
",",
"timestamp",
")",
":",
"Channel",
"(",
"RUN_TASK_CHANNEL",
")",
".",
"send",
"(",
"{",
"'id'",
":",
"self",
".",
"pk",
",",
"'ts'",
":",
"timestamp",
".",
"timestamp",
"(",
")",
"}",
")"
] | 65 | 13.75 |
def get(self, agentml, user=None, key=None):
"""
Evaluate and return the current active topic
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:param key: The user id (defaults to the current user if None)
:type key: str
:return: Active topic of the user
:rtype : str or None
"""
user = agentml.get_user(key) if key else user
if not user:
return
return user.topic | [
"def",
"get",
"(",
"self",
",",
"agentml",
",",
"user",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"user",
"=",
"agentml",
".",
"get_user",
"(",
"key",
")",
"if",
"key",
"else",
"user",
"if",
"not",
"user",
":",
"return",
"return",
"user",
"... | 28.1 | 16.5 |
def make_route_refresh_request(self, peer_ip, *route_families):
"""Request route-refresh for peer with `peer_ip` for given
`route_families`.
Will make route-refresh request for a given `route_family` only if such
capability is supported and if peer is in ESTABLISHED state. Else, such
requests are ignored. Raises appropriate error in other cases. If
`peer_ip` is equal to 'all' makes refresh request to all valid peers.
"""
LOG.debug('Route refresh requested for peer %s and route families %s',
peer_ip, route_families)
if not SUPPORTED_GLOBAL_RF.intersection(route_families):
raise ValueError('Given route family(s) % is not supported.' %
route_families)
peer_list = []
# If route-refresh is requested for all peers.
if peer_ip == 'all':
peer_list.extend(self.get_peers_in_established())
else:
given_peer = self._peers.get(peer_ip)
if not given_peer:
raise ValueError('Invalid/unrecognized peer %s' % peer_ip)
if not given_peer.in_established:
raise ValueError('Peer currently do not have established'
' session.')
peer_list.append(given_peer)
# Make route refresh request to valid peers.
for peer in peer_list:
peer.request_route_refresh(*route_families)
return True | [
"def",
"make_route_refresh_request",
"(",
"self",
",",
"peer_ip",
",",
"*",
"route_families",
")",
":",
"LOG",
".",
"debug",
"(",
"'Route refresh requested for peer %s and route families %s'",
",",
"peer_ip",
",",
"route_families",
")",
"if",
"not",
"SUPPORTED_GLOBAL_RF... | 44.393939 | 21.030303 |
def dot_special(x2d, x3d):
"""Segment-wise dot product.
This function calculates the dot product of x2d with each trial of x3d.
Parameters
----------
x2d : array, shape (p, m)
Input argument.
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples. The dot
product with x2d is calculated for each trial.
Returns
-------
out : array, shape (t, p, n)
Dot product of x2d with each trial of x3d.
Examples
--------
>>> x = np.random.randn(6, 40, 150)
>>> a = np.ones((7, 40))
>>> y = dot_special(a, x)
>>> y.shape
(6, 7, 150)
"""
x3d = atleast_3d(x3d)
x2d = np.atleast_2d(x2d)
return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...]
for i in range(x3d.shape[0])]) | [
"def",
"dot_special",
"(",
"x2d",
",",
"x3d",
")",
":",
"x3d",
"=",
"atleast_3d",
"(",
"x3d",
")",
"x2d",
"=",
"np",
".",
"atleast_2d",
"(",
"x2d",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"x2d",
".",
"dot",
"(",
"x3d",
"[",
"i",
",",
... | 27.033333 | 20.733333 |
def chain(request):
"""shows how the XmlQuerySetChain can be used instead of @toxml decorator"""
bars = foobar_models.Bar.objects.all()
bazs = foobar_models.Baz.objects.all()
qsc = XmlQuerySetChain(bars, bazs)
return HttpResponse(tree.xml(qsc), mimetype='text/xml') | [
"def",
"chain",
"(",
"request",
")",
":",
"bars",
"=",
"foobar_models",
".",
"Bar",
".",
"objects",
".",
"all",
"(",
")",
"bazs",
"=",
"foobar_models",
".",
"Baz",
".",
"objects",
".",
"all",
"(",
")",
"qsc",
"=",
"XmlQuerySetChain",
"(",
"bars",
","... | 46.666667 | 7.666667 |
def send_command_ack(self, device_id, action):
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
log.debug('packet acknowledged')
except concurrent.futures._base.TimeoutError:
acknowledgement = {'ok': False, 'message': 'timeout'}
log.warning('acknowledge timeout')
else:
acknowledgement = self._last_ack.get('ok', False)
finally:
# allow next command
self._ready_to_send.release()
return acknowledgement | [
"def",
"send_command_ack",
"(",
"self",
",",
"device_id",
",",
"action",
")",
":",
"# serialize commands",
"yield",
"from",
"self",
".",
"_ready_to_send",
".",
"acquire",
"(",
")",
"acknowledgement",
"=",
"None",
"try",
":",
"self",
".",
"_command_ack",
".",
... | 40.083333 | 17.125 |
def get_prinz_pot(nstep, x0=0., nskip=1, dt=0.01, kT=10.0, mass=1.0, damping=1.0):
r"""wrapper for the Prinz model generator"""
pw = PrinzModel(dt, kT, mass=mass, damping=damping)
return pw.sample(x0, nstep, nskip=nskip) | [
"def",
"get_prinz_pot",
"(",
"nstep",
",",
"x0",
"=",
"0.",
",",
"nskip",
"=",
"1",
",",
"dt",
"=",
"0.01",
",",
"kT",
"=",
"10.0",
",",
"mass",
"=",
"1.0",
",",
"damping",
"=",
"1.0",
")",
":",
"pw",
"=",
"PrinzModel",
"(",
"dt",
",",
"kT",
... | 57.25 | 15.25 |
def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subword (character cluster)
"""
if not text or not isinstance(text, str):
return ""
p = 0
while p < len(text):
m = PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n | [
"def",
"tcc",
"(",
"text",
":",
"str",
")",
"->",
"str",
":",
"if",
"not",
"text",
"or",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"return",
"\"\"",
"p",
"=",
"0",
"while",
"p",
"<",
"len",
"(",
"text",
")",
":",
"m",
"=",
"PAT_... | 24.333333 | 16.444444 |
def serviceViewChangerOutBox(self, limit: int = None) -> int:
"""
Service at most `limit` number of messages from the view_changer's outBox.
:return: the number of messages successfully serviced.
"""
msgCount = 0
while self.view_changer.outBox and (not limit or msgCount < limit):
msgCount += 1
msg = self.view_changer.outBox.popleft()
if isinstance(msg, (InstanceChange, ViewChangeDone)):
self.send(msg)
else:
logger.error("Received msg {} and don't know how to handle it".
format(msg))
return msgCount | [
"def",
"serviceViewChangerOutBox",
"(",
"self",
",",
"limit",
":",
"int",
"=",
"None",
")",
"->",
"int",
":",
"msgCount",
"=",
"0",
"while",
"self",
".",
"view_changer",
".",
"outBox",
"and",
"(",
"not",
"limit",
"or",
"msgCount",
"<",
"limit",
")",
":... | 40.875 | 20.125 |
def find_column(self, token):
""" Compute column:
- token is a token instance
"""
i = token.lexpos
while i > 0:
if self.input_data[i - 1] == '\n':
break
i -= 1
column = token.lexpos - i + 1
return column | [
"def",
"find_column",
"(",
"self",
",",
"token",
")",
":",
"i",
"=",
"token",
".",
"lexpos",
"while",
"i",
">",
"0",
":",
"if",
"self",
".",
"input_data",
"[",
"i",
"-",
"1",
"]",
"==",
"'\\n'",
":",
"break",
"i",
"-=",
"1",
"column",
"=",
"tok... | 24.75 | 13.25 |
def get_stable_entries(self, charge_to_discharge=True):
"""
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion.
"""
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse() | [
"def",
"get_stable_entries",
"(",
"self",
",",
"charge_to_discharge",
"=",
"True",
")",
":",
"list_copy",
"=",
"list",
"(",
"self",
".",
"_stable_entries",
")",
"return",
"list_copy",
"if",
"charge_to_discharge",
"else",
"list_copy",
".",
"reverse",
"(",
")"
] | 33.642857 | 20.214286 |
def _calculate_average_field_lengths(self):
"""Calculates the average document length for this index"""
accumulator = defaultdict(int)
documents_with_field = defaultdict(int)
for field_ref, length in self.field_lengths.items():
_field_ref = FieldRef.from_string(field_ref)
field = _field_ref.field_name
documents_with_field[field] += 1
accumulator[field] += length
for field_name in self._fields:
accumulator[field_name] /= documents_with_field[field_name]
self.average_field_length = accumulator | [
"def",
"_calculate_average_field_lengths",
"(",
"self",
")",
":",
"accumulator",
"=",
"defaultdict",
"(",
"int",
")",
"documents_with_field",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"field_ref",
",",
"length",
"in",
"self",
".",
"field_lengths",
".",
"items"... | 37.0625 | 15.75 |
def main(argv): # pylint: disable=W0613
'''
Main program body
'''
thin_path = os.path.join(OPTIONS.saltdir, THIN_ARCHIVE)
if os.path.isfile(thin_path):
if OPTIONS.checksum != get_hash(thin_path, OPTIONS.hashfunc):
need_deployment()
unpack_thin(thin_path)
# Salt thin now is available to use
else:
if not sys.platform.startswith('win'):
scpstat = subprocess.Popen(['/bin/sh', '-c', 'command -v scp']).wait()
if scpstat != 0:
sys.exit(EX_SCP_NOT_FOUND)
if os.path.exists(OPTIONS.saltdir) and not os.path.isdir(OPTIONS.saltdir):
sys.stderr.write(
'ERROR: salt path "{0}" exists but is'
' not a directory\n'.format(OPTIONS.saltdir)
)
sys.exit(EX_CANTCREAT)
if not os.path.exists(OPTIONS.saltdir):
need_deployment()
code_checksum_path = os.path.normpath(os.path.join(OPTIONS.saltdir, 'code-checksum'))
if not os.path.exists(code_checksum_path) or not os.path.isfile(code_checksum_path):
sys.stderr.write('WARNING: Unable to locate current code checksum: {0}.\n'.format(code_checksum_path))
need_deployment()
with open(code_checksum_path, 'r') as vpo:
cur_code_cs = vpo.readline().strip()
if cur_code_cs != OPTIONS.code_checksum:
sys.stderr.write('WARNING: current code checksum {0} is different to {1}.\n'.format(cur_code_cs,
OPTIONS.code_checksum))
need_deployment()
# Salt thin exists and is up-to-date - fall through and use it
salt_call_path = os.path.join(OPTIONS.saltdir, 'salt-call')
if not os.path.isfile(salt_call_path):
sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
need_deployment()
with open(os.path.join(OPTIONS.saltdir, 'minion'), 'w') as config:
config.write(OPTIONS.config + '\n')
if OPTIONS.ext_mods:
ext_path = os.path.join(OPTIONS.saltdir, EXT_ARCHIVE)
if os.path.exists(ext_path):
unpack_ext(ext_path)
else:
version_path = os.path.join(OPTIONS.saltdir, 'ext_version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
need_ext()
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.ext_mods:
need_ext()
# Fix parameter passing issue
if len(ARGS) == 1:
argv_prepared = ARGS[0].split()
else:
argv_prepared = ARGS
salt_argv = [
get_executable(),
salt_call_path,
'--retcode-passthrough',
'--local',
'--metadata',
'--out', 'json',
'-l', 'quiet',
'-c', OPTIONS.saltdir
]
try:
if argv_prepared[-1].startswith('--no-parse='):
salt_argv.append(argv_prepared.pop(-1))
except (IndexError, TypeError):
pass
salt_argv.append('--')
salt_argv.extend(argv_prepared)
sys.stderr.write('SALT_ARGV: {0}\n'.format(salt_argv))
# Only emit the delimiter on *both* stdout and stderr when completely successful.
# Yes, the flush() is necessary.
sys.stdout.write(OPTIONS.delimiter + '\n')
sys.stdout.flush()
if not OPTIONS.tty:
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
proc = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Returns bytes instead of string on python 3
stdout, _ = proc.communicate()
sys.stdout.write(stdout.decode(encoding=get_system_encoding(), errors="replace"))
sys.stdout.flush()
retcode = proc.returncode
if OPTIONS.wipe:
shutil.rmtree(OPTIONS.saltdir)
elif OPTIONS.wipe:
retcode = subprocess.call(salt_argv)
shutil.rmtree(OPTIONS.saltdir)
else:
retcode = subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
os.umask(old_umask) # pylint: disable=blacklisted-function
return retcode | [
"def",
"main",
"(",
"argv",
")",
":",
"# pylint: disable=W0613",
"thin_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"OPTIONS",
".",
"saltdir",
",",
"THIN_ARCHIVE",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"thin_path",
")",
":",
"if",
"OPTI... | 38.459459 | 21.162162 |
def from_spec(spec):
"""Return a schema object from a spec.
A spec is either a string for a scalar type, or a list of 0 or 1 specs,
or a dictionary with two elements: {'fields': { ... }, required: [...]}.
"""
if spec == '':
return any_schema
if framework.is_str(spec):
# Scalar type
if spec not in SCALAR_TYPES:
raise exceptions.SchemaError('Not a valid schema type: %r' % spec)
return ScalarSchema(spec)
if framework.is_list(spec):
return ListSchema(spec[0] if len(spec) else any_schema)
if framework.is_tuple(spec):
return TupleSchema(spec.get('fields', {}), spec.get('required', []))
raise exceptions.SchemaError('Not valid schema spec; %r' % spec) | [
"def",
"from_spec",
"(",
"spec",
")",
":",
"if",
"spec",
"==",
"''",
":",
"return",
"any_schema",
"if",
"framework",
".",
"is_str",
"(",
"spec",
")",
":",
"# Scalar type",
"if",
"spec",
"not",
"in",
"SCALAR_TYPES",
":",
"raise",
"exceptions",
".",
"Schem... | 31.045455 | 23.363636 |
def is_inside_bounds(value, params):
"""Return ``True`` if ``value`` is contained in ``params``.
This method supports broadcasting in the sense that for
``params.ndim >= 2``, if more than one value is given, the inputs
are broadcast against each other.
Parameters
----------
value : `array-like`
Value(s) to be checked. For several inputs, the final bool
tells whether all inputs pass the check or not.
params : `IntervalProd`
Set in which the value is / the values are supposed to lie.
Returns
-------
is_inside_bounds : bool
``True`` is all values lie in ``params``, ``False`` otherwise.
Examples
--------
Check a single point:
>>> params = odl.IntervalProd([0, 0], [1, 2])
>>> is_inside_bounds([0, 0], params)
True
>>> is_inside_bounds([0, -1], params)
False
Using broadcasting:
>>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None]
>>> pts_ax1 = np.array([2, 0, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
True
>>> pts_ax1 = np.array([-2, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
False
"""
if value in params:
# Single parameter
return True
else:
if params.ndim == 1:
return params.contains_all(np.ravel(value))
else:
# Flesh out and flatten to check bounds
bcast_value = np.broadcast_arrays(*value)
stacked_value = np.vstack(bcast_value)
flat_value = stacked_value.reshape(params.ndim, -1)
return params.contains_all(flat_value) | [
"def",
"is_inside_bounds",
"(",
"value",
",",
"params",
")",
":",
"if",
"value",
"in",
"params",
":",
"# Single parameter",
"return",
"True",
"else",
":",
"if",
"params",
".",
"ndim",
"==",
"1",
":",
"return",
"params",
".",
"contains_all",
"(",
"np",
".... | 30.365385 | 20.365385 |
def IndexOfNth(s, value, n):
"""Gets the index of Nth occurance of a given character in a string
:param str s:
Input string
:param char value:
Input char to be searched.
:param int n:
Nth occurrence of char to be searched.
:return:
Index of the Nth occurrence in the string.
:rtype: int
"""
remaining = n
for i in xrange(0, len(s)):
if s[i] == value:
remaining -= 1
if remaining == 0:
return i
return -1 | [
"def",
"IndexOfNth",
"(",
"s",
",",
"value",
",",
"n",
")",
":",
"remaining",
"=",
"n",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"s",
")",
")",
":",
"if",
"s",
"[",
"i",
"]",
"==",
"value",
":",
"remaining",
"-=",
"1",
"if",
"... | 22.954545 | 18.409091 |
def csv(cls,
d,
order=None,
header=None,
sort_keys=True):
"""
prints a table in csv format
:param d: A a dict with dicts of the same type.
:type d: dict
:param order:The order in which the columns are printed.
The order is specified by the key names of the dict.
:type order:
:param header: The Header of each of the columns
:type header: list or tuple of field names
:param sort_keys: TODO: not yet implemented
:type sort_keys: bool
:return: a string representing the table in csv format
"""
first_element = list(d)[0]
def _keys():
return list(d[first_element])
# noinspection PyBroadException
def _get(element, key):
try:
tmp = str(d[element][key])
except:
tmp = ' '
return tmp
if d is None or d == {}:
return None
if order is None:
order = _keys()
if header is None and order is not None:
header = order
elif header is None:
header = _keys()
table = ""
content = []
for attribute in order:
content.append(attribute)
table = table + ",".join([str(e) for e in content]) + "\n"
for job in d:
content = []
for attribute in order:
try:
content.append(d[job][attribute])
except:
content.append("None")
table = table + ",".join([str(e) for e in content]) + "\n"
return table | [
"def",
"csv",
"(",
"cls",
",",
"d",
",",
"order",
"=",
"None",
",",
"header",
"=",
"None",
",",
"sort_keys",
"=",
"True",
")",
":",
"first_element",
"=",
"list",
"(",
"d",
")",
"[",
"0",
"]",
"def",
"_keys",
"(",
")",
":",
"return",
"list",
"("... | 28.362069 | 17.948276 |
def update_traded(self, traded_update):
""":param traded_update: [price, size]
"""
if not traded_update:
self.traded.clear()
else:
self.traded.update(traded_update) | [
"def",
"update_traded",
"(",
"self",
",",
"traded_update",
")",
":",
"if",
"not",
"traded_update",
":",
"self",
".",
"traded",
".",
"clear",
"(",
")",
"else",
":",
"self",
".",
"traded",
".",
"update",
"(",
"traded_update",
")"
] | 30.571429 | 7.571429 |
def bitsetxor(b1, b2):
"""
If b1 and b2 would be ``int`` s this would be ``b1 ^ b2`` :
>>> from py_register_machine2.engine_tools.operations import bitsetxor
>>> b1 = [1, 1, 1, 1]
>>> b2 = [1, 1, 0, 1]
>>> bitsetxor(b1, b2)
[0, 0, 1, 0]
>>> bin(0b1111 ^ 0b1101)
'0b10'
"""
res = []
for bit1, bit2 in zip(b1, b2):
res.append( bit1 ^ bit2)
return res | [
"def",
"bitsetxor",
"(",
"b1",
",",
"b2",
")",
":",
"res",
"=",
"[",
"]",
"for",
"bit1",
",",
"bit2",
"in",
"zip",
"(",
"b1",
",",
"b2",
")",
":",
"res",
".",
"append",
"(",
"bit1",
"^",
"bit2",
")",
"return",
"res"
] | 21.8125 | 20.0625 |
async def modify(self, **kwargs):
'''
Corresponds to PATCH request with a resource identifier, modifying a single document in the database
'''
try:
pk = self.pk_type(kwargs['pk'])
# modify is a class method on MongoCollectionMixin
result = await self._meta.object_class.modify(self.db, key=pk, data=self.data)
if result is None:
raise NotFound('Object matching the given {} was not found'.format(self.pk))
return await result.serialize()
except Exception as ex:
logger.exception(ex)
raise BadRequest(ex) | [
"async",
"def",
"modify",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"pk",
"=",
"self",
".",
"pk_type",
"(",
"kwargs",
"[",
"'pk'",
"]",
")",
"# modify is a class method on MongoCollectionMixin",
"result",
"=",
"await",
"self",
".",
"_meta... | 45 | 23.285714 |
def check_sysdeps(vext_files):
"""
Check that imports in 'test_imports' succeed
otherwise display message in 'install_hints'
"""
@run_in_syspy
def run(*modules):
result = {}
for m in modules:
if m:
try:
__import__(m)
result[m] = True
except ImportError:
result[m] = False
return result
success = True
for vext_file in vext_files:
with open(vext_file) as f:
vext = open_spec(f)
install_hint = " ".join(vext.get('install_hints', ['System dependencies not found']))
modules = vext.get('test_import', '')
logger.debug("%s test imports of: %s", vext_file, modules)
result = run(*modules)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
for k, v in result.items():
logger.debug("%s: %s", k, v)
if not all(result.values()):
success = False
print(install_hint)
return success | [
"def",
"check_sysdeps",
"(",
"vext_files",
")",
":",
"@",
"run_in_syspy",
"def",
"run",
"(",
"*",
"modules",
")",
":",
"result",
"=",
"{",
"}",
"for",
"m",
"in",
"modules",
":",
"if",
"m",
":",
"try",
":",
"__import__",
"(",
"m",
")",
"result",
"["... | 31.588235 | 15.588235 |
def _read_vector(ctx: ReaderContext) -> vector.Vector:
"""Read a vector element from the input stream."""
start = ctx.reader.advance()
assert start == "["
return _read_coll(ctx, vector.vector, "]", "vector") | [
"def",
"_read_vector",
"(",
"ctx",
":",
"ReaderContext",
")",
"->",
"vector",
".",
"Vector",
":",
"start",
"=",
"ctx",
".",
"reader",
".",
"advance",
"(",
")",
"assert",
"start",
"==",
"\"[\"",
"return",
"_read_coll",
"(",
"ctx",
",",
"vector",
".",
"v... | 43.8 | 11 |
def _get_last_node_for_prfx(self, node, key_prfx, seen_prfx):
""" get last node for the given prefix, also update `seen_prfx` to track the path already traversed
:param node: node in form of list, or BLANK_NODE
:param key_prfx: prefix to look for
:param seen_prfx: prefix already seen, updates with each call
:return:
BLANK_NODE if does not exist, otherwise value or hash
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
# already reach the expected node
if not key_prfx:
return node
sub_node = self._decode_to_node(node[key_prfx[0]])
seen_prfx.append(key_prfx[0])
return self._get_last_node_for_prfx(sub_node, key_prfx[1:], seen_prfx)
# key value node
curr_key = key_nibbles_from_key_value_node(node)
if node_type == NODE_TYPE_LEAF:
# Return this node only if the complete prefix is part of the current key
if starts_with(curr_key, key_prfx):
# Do not update `seen_prefix` as node has the prefix
return node
else:
return BLANK_NODE
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
if len(key_prfx) > len(curr_key):
if starts_with(key_prfx, curr_key):
sub_node = self._get_inner_node_from_extension(node)
seen_prfx.extend(curr_key)
return self._get_last_node_for_prfx(sub_node,
key_prfx[len(curr_key):],
seen_prfx)
else:
return BLANK_NODE
else:
if starts_with(curr_key, key_prfx):
# Do not update `seen_prefix` as node has the prefix
return node
else:
return BLANK_NODE | [
"def",
"_get_last_node_for_prfx",
"(",
"self",
",",
"node",
",",
"key_prfx",
",",
"seen_prfx",
")",
":",
"node_type",
"=",
"self",
".",
"_get_node_type",
"(",
"node",
")",
"if",
"node_type",
"==",
"NODE_TYPE_BLANK",
":",
"return",
"BLANK_NODE",
"if",
"node_typ... | 41.1 | 18.02 |
def setup_console_logger(log_level='error', log_format=None, date_format=None):
'''
Setup the console logger
'''
if is_console_configured():
logging.getLogger(__name__).warning('Console logging already configured')
return
# Remove the temporary logging handler
__remove_temp_logging_handler()
if log_level is None:
log_level = 'warning'
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
setLogRecordFactory(SaltColorLogRecord)
handler = None
for handler in logging.root.handlers:
if handler is LOGGING_STORE_HANDLER:
continue
if not hasattr(handler, 'stream'):
# Not a stream handler, continue
continue
if handler.stream is sys.stderr:
# There's already a logging handler outputting to sys.stderr
break
else:
handler = StreamHandler(sys.stderr)
handler.setLevel(level)
# Set the default console formatter config
if not log_format:
log_format = '[%(levelname)-8s] %(message)s'
if not date_format:
date_format = '%H:%M:%S'
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
global __CONSOLE_CONFIGURED
global __LOGGING_CONSOLE_HANDLER
__CONSOLE_CONFIGURED = True
__LOGGING_CONSOLE_HANDLER = handler | [
"def",
"setup_console_logger",
"(",
"log_level",
"=",
"'error'",
",",
"log_format",
"=",
"None",
",",
"date_format",
"=",
"None",
")",
":",
"if",
"is_console_configured",
"(",
")",
":",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"warning",
"(",
... | 27.897959 | 20.061224 |
def fit(self, X, y):
"""Scikit-learn required: Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
Copy of the ReliefF instance
"""
self._X = X # matrix of predictive variables ('independent variables')
self._y = y # vector of values for outcome variable ('dependent variable')
# Set up the properties for ReliefF -------------------------------------------------------------------------------------
self._datalen = len(self._X) # Number of training instances ('n')
""""Below: Handles special case where user requests that a proportion of training instances be neighbors for
ReliefF rather than a specified 'k' number of neighbors. Note that if k is specified, then k 'hits' and k
'misses' will be used to update feature scores. Thus total number of neighbors is 2k. If instead a proportion
is specified (say 0.1 out of 1000 instances) this represents the total number of neighbors (e.g. 100). In this
case, k would be set to 50 (i.e. 50 hits and 50 misses). """
if hasattr(self, 'n_neighbors') and type(self.n_neighbors) is float:
# Halve the number of neighbors because ReliefF uses n_neighbors matches and n_neighbors misses
self.n_neighbors = int(self.n_neighbors * self._datalen * 0.5)
# Number of unique outcome (label) values (used to determine outcome variable type)
self._label_list = list(set(self._y))
# Determine if label is discrete
discrete_label = (len(self._label_list) <= self.discrete_threshold)
# Identify label type (binary, multiclass, or continuous)
if discrete_label:
if len(self._label_list) == 2:
self._class_type = 'binary'
self.mcmap = 0
elif len(self._label_list) > 2:
self._class_type = 'multiclass'
self.mcmap = self._getMultiClassMap()
else:
raise ValueError('All labels are of the same class.')
else:
self._class_type = 'continuous'
self.mcmap = 0
# Training labels standard deviation -- only used if the training labels are continuous
self._labels_std = 0.
if len(self._label_list) > self.discrete_threshold:
self._labels_std = np.std(self._y, ddof=1)
self._num_attributes = len(self._X[0]) # Number of features in training data
# Number of missing data values in predictor variable matrix.
self._missing_data_count = np.isnan(self._X).sum()
"""Assign internal headers for the features (scikit-learn does not accept external headers from dataset):
The pre_normalize() function relies on the headers being ordered, e.g., X01, X02, etc.
If this is changed, then the sort in the pre_normalize() function needs to be adapted as well. """
xlen = len(self._X[0])
mxlen = len(str(xlen + 1))
self._headers = ['X{}'.format(str(i).zfill(mxlen)) for i in range(1, xlen + 1)]
start = time.time() # Runtime tracking
# Determine data types for all features/attributes in training data (i.e. discrete or continuous)
C = D = False
# Examines each feature and applies discrete_threshold to determine variable type.
self.attr = self._get_attribute_info()
for key in self.attr.keys():
if self.attr[key][0] == 'discrete':
D = True
if self.attr[key][0] == 'continuous':
C = True
# For downstream computational efficiency, determine if dataset is comprised of all discrete, all continuous, or a mix of discrete/continuous features.
if C and D:
self.data_type = 'mixed'
elif D and not C:
self.data_type = 'discrete'
elif C and not D:
self.data_type = 'continuous'
else:
raise ValueError('Invalid data type in data set.')
#--------------------------------------------------------------------------------------------------------------------
# Compute the distance array between all data points ----------------------------------------------------------------
# For downstream efficiency, separate features in dataset by type (i.e. discrete/continuous)
diffs, cidx, didx = self._dtype_array()
cdiffs = diffs[cidx] # max/min continuous value difference for continuous features.
xc = self._X[:, cidx] # Subset of continuous-valued feature data
xd = self._X[:, didx] # Subset of discrete-valued feature data
""" For efficiency, the distance array is computed more efficiently for data with no missing values.
This distance array will only be used to identify nearest neighbors. """
if self._missing_data_count > 0:
self._distance_array = self._distarray_missing(xc, xd, cdiffs)
else:
self._distance_array = self._distarray_no_missing(xc, xd)
if self.verbose:
elapsed = time.time() - start
print('Created distance array in {} seconds.'.format(elapsed))
print('Feature scoring under way ...')
start = time.time()
#--------------------------------------------------------------------------------------------------------------------
# Run remainder of algorithm (i.e. identification of 'neighbors' for each instance, and feature scoring).------------
# Stores feature importance scores for ReliefF or respective Relief-based algorithm.
self.feature_importances_ = self._run_algorithm()
# Delete the internal distance array because it is no longer needed
del self._distance_array
if self.verbose:
elapsed = time.time() - start
print('Completed scoring in {} seconds.'.format(elapsed))
# Compute indices of top features
self.top_features_ = np.argsort(self.feature_importances_)[::-1]
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"_X",
"=",
"X",
"# matrix of predictive variables ('independent variables')",
"self",
".",
"_y",
"=",
"y",
"# vector of values for outcome variable ('dependent variable')",
"# Set up the properties for ... | 48.171875 | 28.40625 |
def calculate_gru_output_shapes(operator):
'''
See GRU's conversion function for its output shapes.
'''
check_input_and_output_numbers(operator, input_count_range=[1, 2], output_count_range=[1, 2])
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input_shape = operator.inputs[0].type.shape
if len(input_shape) not in [2, 4]:
raise RuntimeError('Input must be a [N, C]- or [N, C, 1, 1]-tensor')
if operator.type == 'gru':
params = operator.raw_operator.gru
elif operator.type == 'simpleRecurrent':
params = operator.raw_operator.simpleRecurrent
else:
raise RuntimeError('Only GRU and SimpleRNN are supported')
# The following line is more accurate but it may break some tests
# output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [2, params.outputVectorSize]
output_shape = [input_shape[0] if params.sequenceOutput else 'None', params.outputVectorSize] # 'None' should be 1
state_shape = [1, params.outputVectorSize]
# TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1] # The initial hidden state of a single sequence
Y_h_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape | [
"def",
"calculate_gru_output_shapes",
"(",
"operator",
")",
":",
"check_input_and_output_numbers",
"(",
"operator",
",",
"input_count_range",
"=",
"[",
"1",
",",
"2",
"]",
",",
"output_count_range",
"=",
"[",
"1",
",",
"2",
"]",
")",
"check_input_and_output_types"... | 45.40625 | 28.21875 |
def check_name(self, name=None):
'''
Checks the plugin name and sets it accordingly.
Uses name if specified, class name if not set.
'''
if name:
self.plugin_info['check_name'] = name
if self.plugin_info['check_name'] is not None:
return self.plugin_info['check_name']
return self.__class__.__name__ | [
"def",
"check_name",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"self",
".",
"plugin_info",
"[",
"'check_name'",
"]",
"=",
"name",
"if",
"self",
".",
"plugin_info",
"[",
"'check_name'",
"]",
"is",
"not",
"None",
":",
"return",
... | 30.75 | 19.416667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.