code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def split_interface_name(interface, ch_grp=0):
"""Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
"""
interface = interface.lower()
if ch_grp != 0:
intf_type = 'port-channel'
port = str(ch_grp)
elif ':' in interface:
intf_type, port = interface.split(':')
elif interface.startswith('ethernet'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('ethernet')
elif interface.startswith('port-channel'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('port-channel')
else:
intf_type, port = 'ethernet', interface
return intf_type, port | Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name | Below is the the instruction that describes the task:
### Input:
Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
### Response:
def split_interface_name(interface, ch_grp=0):
"""Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
"""
interface = interface.lower()
if ch_grp != 0:
intf_type = 'port-channel'
port = str(ch_grp)
elif ':' in interface:
intf_type, port = interface.split(':')
elif interface.startswith('ethernet'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('ethernet')
elif interface.startswith('port-channel'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('port-channel')
else:
intf_type, port = 'ethernet', interface
return intf_type, port |
def get_profiling_stats(self):
"""
Returns a pstat.Stats instance with profiling results if `run` was called with `should_profile=True`.
Otherwise, returns `None`.
"""
profile_file_path = os.path.join(self.workspace, 'profiling.bin')
try:
return pstats.Stats(profile_file_path)
except Exception as e:
logger.debug(f'Failed to get profiling stats: {e}')
return None | Returns a pstat.Stats instance with profiling results if `run` was called with `should_profile=True`.
Otherwise, returns `None`. | Below is the the instruction that describes the task:
### Input:
Returns a pstat.Stats instance with profiling results if `run` was called with `should_profile=True`.
Otherwise, returns `None`.
### Response:
def get_profiling_stats(self):
"""
Returns a pstat.Stats instance with profiling results if `run` was called with `should_profile=True`.
Otherwise, returns `None`.
"""
profile_file_path = os.path.join(self.workspace, 'profiling.bin')
try:
return pstats.Stats(profile_file_path)
except Exception as e:
logger.debug(f'Failed to get profiling stats: {e}')
return None |
def compare_list_iter(propval_a, propval_b, fs_a=None, fs_b=None,
options=None):
"""Generator for comparing 'simple' lists when they are encountered. This
does not currently recurse further. Arguments are as per other
``compare_``\ *X* functions.
"""
if fs_a is None:
fs_a = FieldSelector(tuple())
fs_b = FieldSelector(tuple())
if not options:
options = DiffOptions()
propvals = dict(a=propval_a, b=propval_b)
values = dict()
indices = dict()
for x in "a", "b":
propval_x = propvals[x]
vals = values[x] = set()
rev_key = indices[x] = dict()
seen = collections.Counter()
for i, v in collection_generator(propval_x):
v = options.normalize_item(
v, propval_a if options.duck_type else propval_x
)
if not v.__hash__:
v = repr(v)
if v is not _nothing or not options.ignore_empty_slots:
vals.add((v, seen[v]))
rev_key[(v, seen[v])] = i
seen[v] += 1
removed = values['a'] - values['b']
added = values['b'] - values['a']
if options.unchanged or options.moved:
unchanged = values['a'] & values['b']
for v, seq in unchanged:
a_idx = indices['a'][v, seq]
b_idx = indices['b'][v, seq]
if options.moved and a_idx != b_idx:
yield DiffInfo(
diff_type=DiffTypes.MOVED,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
elif options.unchanged:
yield DiffInfo(
diff_type=DiffTypes.NO_CHANGE,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
removed_idx = set(indices['a'][v, seq] for v, seq in removed)
added_idx = set(indices['b'][v, seq] for v, seq in added)
modified_idx = set(removed_idx.intersection(added_idx))
for v, seq in removed:
a_key = indices['a'][v, seq]
if a_key in modified_idx:
continue
selector = fs_a + [a_key]
yield DiffInfo(
diff_type=DiffTypes.REMOVED,
base=selector,
other=fs_b,
)
for v, seq in added:
b_key = indices['b'][v, seq]
if b_key in modified_idx:
continue
selector = fs_b + [b_key]
yield DiffInfo(
diff_type=DiffTypes.ADDED,
base=fs_a,
other=selector,
)
for idx in modified_idx:
yield DiffInfo(
diff_type=DiffTypes.MODIFIED,
base=fs_a + [idx],
other=fs_b + [idx],
) | Generator for comparing 'simple' lists when they are encountered. This
does not currently recurse further. Arguments are as per other
``compare_``\ *X* functions. | Below is the the instruction that describes the task:
### Input:
Generator for comparing 'simple' lists when they are encountered. This
does not currently recurse further. Arguments are as per other
``compare_``\ *X* functions.
### Response:
def compare_list_iter(propval_a, propval_b, fs_a=None, fs_b=None,
options=None):
"""Generator for comparing 'simple' lists when they are encountered. This
does not currently recurse further. Arguments are as per other
``compare_``\ *X* functions.
"""
if fs_a is None:
fs_a = FieldSelector(tuple())
fs_b = FieldSelector(tuple())
if not options:
options = DiffOptions()
propvals = dict(a=propval_a, b=propval_b)
values = dict()
indices = dict()
for x in "a", "b":
propval_x = propvals[x]
vals = values[x] = set()
rev_key = indices[x] = dict()
seen = collections.Counter()
for i, v in collection_generator(propval_x):
v = options.normalize_item(
v, propval_a if options.duck_type else propval_x
)
if not v.__hash__:
v = repr(v)
if v is not _nothing or not options.ignore_empty_slots:
vals.add((v, seen[v]))
rev_key[(v, seen[v])] = i
seen[v] += 1
removed = values['a'] - values['b']
added = values['b'] - values['a']
if options.unchanged or options.moved:
unchanged = values['a'] & values['b']
for v, seq in unchanged:
a_idx = indices['a'][v, seq]
b_idx = indices['b'][v, seq]
if options.moved and a_idx != b_idx:
yield DiffInfo(
diff_type=DiffTypes.MOVED,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
elif options.unchanged:
yield DiffInfo(
diff_type=DiffTypes.NO_CHANGE,
base=fs_a + [a_idx],
other=fs_b + [b_idx],
)
removed_idx = set(indices['a'][v, seq] for v, seq in removed)
added_idx = set(indices['b'][v, seq] for v, seq in added)
modified_idx = set(removed_idx.intersection(added_idx))
for v, seq in removed:
a_key = indices['a'][v, seq]
if a_key in modified_idx:
continue
selector = fs_a + [a_key]
yield DiffInfo(
diff_type=DiffTypes.REMOVED,
base=selector,
other=fs_b,
)
for v, seq in added:
b_key = indices['b'][v, seq]
if b_key in modified_idx:
continue
selector = fs_b + [b_key]
yield DiffInfo(
diff_type=DiffTypes.ADDED,
base=fs_a,
other=selector,
)
for idx in modified_idx:
yield DiffInfo(
diff_type=DiffTypes.MODIFIED,
base=fs_a + [idx],
other=fs_b + [idx],
) |
def set_cluster_fields(self, cluster_fields):
''' Tell the clusterizer the meaning of the field names.
The cluster_fields parameter is a dict, e.g., {"new filed name": "standard field name"}.
'''
if not cluster_fields:
cluster_fields_mapping_inverse = {}
cluster_fields_mapping = {}
else:
# Create also the inverse dictionary for faster lookup
cluster_fields_mapping_inverse = dict((k, v) for k, v in cluster_fields.items())
cluster_fields_mapping = dict((v, k) for k, v in cluster_fields.items())
for old_name, new_name in self._default_cluster_fields_mapping.items():
if old_name not in cluster_fields_mapping:
cluster_fields_mapping[old_name] = new_name
cluster_fields_mapping_inverse[new_name] = old_name
self._cluster_fields_mapping = cluster_fields_mapping
self._cluster_fields_mapping_inverse = cluster_fields_mapping_inverse | Tell the clusterizer the meaning of the field names.
The cluster_fields parameter is a dict, e.g., {"new filed name": "standard field name"}. | Below is the the instruction that describes the task:
### Input:
Tell the clusterizer the meaning of the field names.
The cluster_fields parameter is a dict, e.g., {"new filed name": "standard field name"}.
### Response:
def set_cluster_fields(self, cluster_fields):
''' Tell the clusterizer the meaning of the field names.
The cluster_fields parameter is a dict, e.g., {"new filed name": "standard field name"}.
'''
if not cluster_fields:
cluster_fields_mapping_inverse = {}
cluster_fields_mapping = {}
else:
# Create also the inverse dictionary for faster lookup
cluster_fields_mapping_inverse = dict((k, v) for k, v in cluster_fields.items())
cluster_fields_mapping = dict((v, k) for k, v in cluster_fields.items())
for old_name, new_name in self._default_cluster_fields_mapping.items():
if old_name not in cluster_fields_mapping:
cluster_fields_mapping[old_name] = new_name
cluster_fields_mapping_inverse[new_name] = old_name
self._cluster_fields_mapping = cluster_fields_mapping
self._cluster_fields_mapping_inverse = cluster_fields_mapping_inverse |
def addTable(D):
"""
Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset)
"""
_swap = {
"1": "measurement",
"2": "summary",
"3": "ensemble",
"4": "distribution"
}
print("What type of table would you like to add?\n"
"1: measurement\n"
"2: summary\n"
"3: ensemble (under development)\n"
"4: distribution (under development)\n"
"\n Note: if you want to add a whole model, use the addModel() function")
_ans = input(">")
if _ans in ["3", "4"]:
print("I don't know how to do that yet.")
# if this is a summary or measurement, split the csv into each column
elif _ans in ["1", "2"]:
# read in a csv file. have the user point to it
print("Locate the CSV file with the values for this table: ")
_path, _files = browse_dialog_file()
_path = _confirm_file_path(_files)
_values = read_csv_from_file(_path)
_table = _build_table(_values)
_placement = _prompt_placement(D, _swap[_ans])
D = _put_table(D, _placement, _table)
else:
print("That's not a valid option")
return D | Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset) | Below is the the instruction that describes the task:
### Input:
Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset)
### Response:
def addTable(D):
"""
Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset)
"""
_swap = {
"1": "measurement",
"2": "summary",
"3": "ensemble",
"4": "distribution"
}
print("What type of table would you like to add?\n"
"1: measurement\n"
"2: summary\n"
"3: ensemble (under development)\n"
"4: distribution (under development)\n"
"\n Note: if you want to add a whole model, use the addModel() function")
_ans = input(">")
if _ans in ["3", "4"]:
print("I don't know how to do that yet.")
# if this is a summary or measurement, split the csv into each column
elif _ans in ["1", "2"]:
# read in a csv file. have the user point to it
print("Locate the CSV file with the values for this table: ")
_path, _files = browse_dialog_file()
_path = _confirm_file_path(_files)
_values = read_csv_from_file(_path)
_table = _build_table(_values)
_placement = _prompt_placement(D, _swap[_ans])
D = _put_table(D, _placement, _table)
else:
print("That's not a valid option")
return D |
def start(self, driver=None):
"""Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform.
"""
if driver is not None:
assert driver in [
'alsa',
'oss',
'jack',
'portaudio',
'sndmgr',
'coreaudio',
'Direct Sound',
'dsound',
'pulseaudio'
]
fluid_settings_setstr(self.settings, 'audio.driver', driver)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth) | Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform. | Below is the the instruction that describes the task:
### Input:
Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform.
### Response:
def start(self, driver=None):
"""Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform.
"""
if driver is not None:
assert driver in [
'alsa',
'oss',
'jack',
'portaudio',
'sndmgr',
'coreaudio',
'Direct Sound',
'dsound',
'pulseaudio'
]
fluid_settings_setstr(self.settings, 'audio.driver', driver)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth) |
def get_all_services(resource_root, cluster_name="default", view=None):
"""
Get all services
@param resource_root: The root Resource object.
@param cluster_name: Cluster name
@return: A list of ApiService objects.
"""
return call(resource_root.get,
SERVICES_PATH % (cluster_name,),
ApiService, True, params=view and dict(view=view) or None) | Get all services
@param resource_root: The root Resource object.
@param cluster_name: Cluster name
@return: A list of ApiService objects. | Below is the the instruction that describes the task:
### Input:
Get all services
@param resource_root: The root Resource object.
@param cluster_name: Cluster name
@return: A list of ApiService objects.
### Response:
def get_all_services(resource_root, cluster_name="default", view=None):
"""
Get all services
@param resource_root: The root Resource object.
@param cluster_name: Cluster name
@return: A list of ApiService objects.
"""
return call(resource_root.get,
SERVICES_PATH % (cluster_name,),
ApiService, True, params=view and dict(view=view) or None) |
def get_analysis_dict( root, pos, form ):
''' Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as
EstNLTK's analysis dict:
{
"clitic": string,
"ending": string,
"form": string,
"partofspeech": string,
"root": string
},
Returns the dict;
'''
import sys
result = { CLITIC:"", ENDING:"", FORM:form, POSTAG:pos, ROOT:"" }
breakpoint = -1
for i in range(len(root)-1, -1, -1):
if root[i] == '+':
breakpoint = i
break
if breakpoint == -1:
result[ROOT] = root
result[ENDING] = "0"
if not re.match("^\W+$", root):
try:
print( " No breakpoint found from: ", root, pos, form, file=sys.stderr )
except UnicodeEncodeError:
print( " No breakpoint found from input *root*!", file=sys.stderr )
else:
result[ROOT] = root[0:breakpoint]
result[ENDING] = root[breakpoint+1:]
if result[ENDING].endswith('ki') and len(result[ENDING]) > 2:
result[CLITIC] = 'ki'
result[ENDING] = re.sub('ki$', '', result[ENDING])
if result[ENDING].endswith('gi') and len(result[ENDING]) > 2:
result[CLITIC] = 'gi'
result[ENDING] = re.sub('gi$', '', result[ENDING])
return result | Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as
EstNLTK's analysis dict:
{
"clitic": string,
"ending": string,
"form": string,
"partofspeech": string,
"root": string
},
Returns the dict; | Below is the the instruction that describes the task:
### Input:
Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as
EstNLTK's analysis dict:
{
"clitic": string,
"ending": string,
"form": string,
"partofspeech": string,
"root": string
},
Returns the dict;
### Response:
def get_analysis_dict( root, pos, form ):
''' Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as
EstNLTK's analysis dict:
{
"clitic": string,
"ending": string,
"form": string,
"partofspeech": string,
"root": string
},
Returns the dict;
'''
import sys
result = { CLITIC:"", ENDING:"", FORM:form, POSTAG:pos, ROOT:"" }
breakpoint = -1
for i in range(len(root)-1, -1, -1):
if root[i] == '+':
breakpoint = i
break
if breakpoint == -1:
result[ROOT] = root
result[ENDING] = "0"
if not re.match("^\W+$", root):
try:
print( " No breakpoint found from: ", root, pos, form, file=sys.stderr )
except UnicodeEncodeError:
print( " No breakpoint found from input *root*!", file=sys.stderr )
else:
result[ROOT] = root[0:breakpoint]
result[ENDING] = root[breakpoint+1:]
if result[ENDING].endswith('ki') and len(result[ENDING]) > 2:
result[CLITIC] = 'ki'
result[ENDING] = re.sub('ki$', '', result[ENDING])
if result[ENDING].endswith('gi') and len(result[ENDING]) > 2:
result[CLITIC] = 'gi'
result[ENDING] = re.sub('gi$', '', result[ENDING])
return result |
def ValidateCertificateHostname(cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = GetValidHostsForCert(cert)
boto.log.debug(
"validating server certificate: hostname=%s, certificate hosts=%s",
hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False | Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate. | Below is the the instruction that describes the task:
### Input:
Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
### Response:
def ValidateCertificateHostname(cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = GetValidHostsForCert(cert)
boto.log.debug(
"validating server certificate: hostname=%s, certificate hosts=%s",
hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False |
def classify(self, gadget):
"""Classify gadgets.
"""
typed_gadgets = []
for g_type, g_classifier in self._classifiers.items():
try:
typed_gadgets += self._classify(gadget, g_classifier, g_type, self._emu_iters)
except:
import traceback
print("[-] Error classifying gadgets :")
print(gadget)
print("")
print(traceback.format_exc())
# Sort and return.
return sorted(typed_gadgets, key=lambda g: str(g)) | Classify gadgets. | Below is the the instruction that describes the task:
### Input:
Classify gadgets.
### Response:
def classify(self, gadget):
"""Classify gadgets.
"""
typed_gadgets = []
for g_type, g_classifier in self._classifiers.items():
try:
typed_gadgets += self._classify(gadget, g_classifier, g_type, self._emu_iters)
except:
import traceback
print("[-] Error classifying gadgets :")
print(gadget)
print("")
print(traceback.format_exc())
# Sort and return.
return sorted(typed_gadgets, key=lambda g: str(g)) |
def toggleLongTouchPoint(self):
'''
Toggles the long touch point operation.
'''
if not self.isLongTouchingPoint:
msg = 'Long touching point'
self.toast(msg, background=Color.GREEN)
self.statusBar.set(msg)
self.isLongTouchingPoint = True
# FIXME: There should be 2 methods DIP & PX
self.coordinatesUnit = Unit.PX
else:
self.toast(None)
self.statusBar.clear()
self.isLongTouchingPoint = False | Toggles the long touch point operation. | Below is the the instruction that describes the task:
### Input:
Toggles the long touch point operation.
### Response:
def toggleLongTouchPoint(self):
'''
Toggles the long touch point operation.
'''
if not self.isLongTouchingPoint:
msg = 'Long touching point'
self.toast(msg, background=Color.GREEN)
self.statusBar.set(msg)
self.isLongTouchingPoint = True
# FIXME: There should be 2 methods DIP & PX
self.coordinatesUnit = Unit.PX
else:
self.toast(None)
self.statusBar.clear()
self.isLongTouchingPoint = False |
def get_parameters(self, params, graph=None):
"""Get the parameters of the model.
:param params: dictionary of keys (str names) and values (tensors).
:return: evaluated tensors in params
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
out = {}
for par in params:
if type(params[par]) == list:
for i, p in enumerate(params[par]):
out[par + '-' + str(i+1)] = p.eval()
else:
out[par] = params[par].eval()
return out | Get the parameters of the model.
:param params: dictionary of keys (str names) and values (tensors).
:return: evaluated tensors in params | Below is the the instruction that describes the task:
### Input:
Get the parameters of the model.
:param params: dictionary of keys (str names) and values (tensors).
:return: evaluated tensors in params
### Response:
def get_parameters(self, params, graph=None):
"""Get the parameters of the model.
:param params: dictionary of keys (str names) and values (tensors).
:return: evaluated tensors in params
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
out = {}
for par in params:
if type(params[par]) == list:
for i, p in enumerate(params[par]):
out[par + '-' + str(i+1)] = p.eval()
else:
out[par] = params[par].eval()
return out |
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap | Initializes a map representation of this tag's attributes,
if not already initialized. | Below is the the instruction that describes the task:
### Input:
Initializes a map representation of this tag's attributes,
if not already initialized.
### Response:
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap |
def get_conf_attr(self, attr, default=None):
"""
Get the value of a attribute in the configuration
:param attr: The attribute
:param default: If the attribute doesn't appear in the configuration
return this value
:return: The value of attribute in the configuration or the default
value
"""
if attr in self.conf:
return self.conf[attr]
else:
return default | Get the value of a attribute in the configuration
:param attr: The attribute
:param default: If the attribute doesn't appear in the configuration
return this value
:return: The value of attribute in the configuration or the default
value | Below is the the instruction that describes the task:
### Input:
Get the value of a attribute in the configuration
:param attr: The attribute
:param default: If the attribute doesn't appear in the configuration
return this value
:return: The value of attribute in the configuration or the default
value
### Response:
def get_conf_attr(self, attr, default=None):
"""
Get the value of a attribute in the configuration
:param attr: The attribute
:param default: If the attribute doesn't appear in the configuration
return this value
:return: The value of attribute in the configuration or the default
value
"""
if attr in self.conf:
return self.conf[attr]
else:
return default |
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0,
disconnect=False):
'''
Close this connection.
'''
self._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
if disconnect:
self._closed = True
self.disconnect()
self._callback_close()
else:
self._channels[0].close() | Close this connection. | Below is the the instruction that describes the task:
### Input:
Close this connection.
### Response:
def close(self, reply_code=0, reply_text='', class_id=0, method_id=0,
disconnect=False):
'''
Close this connection.
'''
self._close_info = {
'reply_code': reply_code,
'reply_text': reply_text,
'class_id': class_id,
'method_id': method_id
}
if disconnect:
self._closed = True
self.disconnect()
self._callback_close()
else:
self._channels[0].close() |
def open_file(self, info):
""" Handles the open action. """
if not info.initialized: return # Escape.
# retval = self.edit_traits(parent=info.ui.control, view="file_view")
dlg = FileDialog( action = "open",
wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|"
"*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|"
"All Files (*.*)|*.*|")
if dlg.open() == OK:
parser = GodotDataParser()
model = parser.parse_dot_file(dlg.path)
if model is not None:
self.model = model
else:
print "error parsing: %s" % dlg.path
self.save_file = dlg.path
del dlg | Handles the open action. | Below is the the instruction that describes the task:
### Input:
Handles the open action.
### Response:
def open_file(self, info):
""" Handles the open action. """
if not info.initialized: return # Escape.
# retval = self.edit_traits(parent=info.ui.control, view="file_view")
dlg = FileDialog( action = "open",
wildcard = "Graphviz Files (*.dot, *.xdot, *.txt)|"
"*.dot;*.xdot;*.txt|Dot Files (*.dot)|*.dot|"
"All Files (*.*)|*.*|")
if dlg.open() == OK:
parser = GodotDataParser()
model = parser.parse_dot_file(dlg.path)
if model is not None:
self.model = model
else:
print "error parsing: %s" % dlg.path
self.save_file = dlg.path
del dlg |
def get_all_yep(self):
"""Cascading style sheet (CSS) extension points.
:returns: dict: {yep: [css...], ...}
"""
yeps = {}
for p in self.get_enabled_plugins:
for e, v in p["plugin_yep"].items():
yep = yeps.get(e, []) + v
yeps[e] = yep
return yeps | Cascading style sheet (CSS) extension points.
:returns: dict: {yep: [css...], ...} | Below is the the instruction that describes the task:
### Input:
Cascading style sheet (CSS) extension points.
:returns: dict: {yep: [css...], ...}
### Response:
def get_all_yep(self):
"""Cascading style sheet (CSS) extension points.
:returns: dict: {yep: [css...], ...}
"""
yeps = {}
for p in self.get_enabled_plugins:
for e, v in p["plugin_yep"].items():
yep = yeps.get(e, []) + v
yeps[e] = yep
return yeps |
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s) | Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-) | Below is the the instruction that describes the task:
### Input:
Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)
### Response:
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s) |
def delete(self, domain, delete_subdomains=False):
"""
Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'.
"""
uri = "/%s/%s" % (self.uri_base, utils.get_id(domain))
if delete_subdomains:
uri = "%s?deleteSubdomains=true" % uri
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False) | Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'. | Below is the the instruction that describes the task:
### Input:
Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'.
### Response:
def delete(self, domain, delete_subdomains=False):
"""
Deletes the specified domain and all of its resource records. If the
domain has subdomains, each subdomain will now become a root domain. If
you wish to also delete any subdomains, pass True to 'delete_subdomains'.
"""
uri = "/%s/%s" % (self.uri_base, utils.get_id(domain))
if delete_subdomains:
uri = "%s?deleteSubdomains=true" % uri
resp, resp_body = self._async_call(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False) |
def isiterable_notstring(value):
"""
Returns True if the value is iterable but not a string. Otherwise returns False.
:param value: Value to check.
"""
if isinstance(value, str):
return False
return isinstance(value, Iterable) or isgeneratorfunction(value) or isgenerator(value) | Returns True if the value is iterable but not a string. Otherwise returns False.
:param value: Value to check. | Below is the the instruction that describes the task:
### Input:
Returns True if the value is iterable but not a string. Otherwise returns False.
:param value: Value to check.
### Response:
def isiterable_notstring(value):
"""
Returns True if the value is iterable but not a string. Otherwise returns False.
:param value: Value to check.
"""
if isinstance(value, str):
return False
return isinstance(value, Iterable) or isgeneratorfunction(value) or isgenerator(value) |
def sav_to_pandas_savreader(input_file):
"""
SPSS .sav files to Pandas DataFrame through savreader module
:param input_file: string
:return:
"""
from savReaderWriter import SavReader
lines = []
with SavReader(input_file, returnHeader=True) as reader:
header = next(reader)
for line in reader:
lines.append(line)
return pd.DataFrame(data=lines, columns=header) | SPSS .sav files to Pandas DataFrame through savreader module
:param input_file: string
:return: | Below is the the instruction that describes the task:
### Input:
SPSS .sav files to Pandas DataFrame through savreader module
:param input_file: string
:return:
### Response:
def sav_to_pandas_savreader(input_file):
"""
SPSS .sav files to Pandas DataFrame through savreader module
:param input_file: string
:return:
"""
from savReaderWriter import SavReader
lines = []
with SavReader(input_file, returnHeader=True) as reader:
header = next(reader)
for line in reader:
lines.append(line)
return pd.DataFrame(data=lines, columns=header) |
def search_cloud_integration_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
return data | Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
### Response:
def search_cloud_integration_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_entities_with_http_info(**kwargs) # noqa: E501
return data |
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id)) | Return activities for category. If category is not specified, will
return activities that have no category | Below is the the instruction that describes the task:
### Input:
Return activities for category. If category is not specified, will
return activities that have no category
### Response:
def get_category_activities(self, category_id = None):
"""Return activities for category. If category is not specified, will
return activities that have no category"""
category_id = category_id or -1
return self._to_dict(('id', 'name', 'category_id', 'category'), self.conn.GetCategoryActivities(category_id)) |
def download_manylinux_wheels(self, abi, packages, directory):
# type: (str, List[str], str) -> None
"""Download wheel files for manylinux for all the given packages."""
# If any one of these dependencies fails pip will bail out. Since we
# are only interested in all the ones we can download, we need to feed
# each package to pip individually. The return code of pip doesn't
# matter here since we will inspect the working directory to see which
# wheels were downloaded. We are only interested in wheel files
# compatible with lambda, which means manylinux1_x86_64 platform and
# cpython implementation. The compatible abi depends on the python
# version and is checked later.
for package in packages:
arguments = ['--only-binary=:all:', '--no-deps', '--platform',
'manylinux1_x86_64', '--implementation', 'cp',
'--abi', abi, '--dest', directory, package]
self._execute('download', arguments) | Download wheel files for manylinux for all the given packages. | Below is the the instruction that describes the task:
### Input:
Download wheel files for manylinux for all the given packages.
### Response:
def download_manylinux_wheels(self, abi, packages, directory):
# type: (str, List[str], str) -> None
"""Download wheel files for manylinux for all the given packages."""
# If any one of these dependencies fails pip will bail out. Since we
# are only interested in all the ones we can download, we need to feed
# each package to pip individually. The return code of pip doesn't
# matter here since we will inspect the working directory to see which
# wheels were downloaded. We are only interested in wheel files
# compatible with lambda, which means manylinux1_x86_64 platform and
# cpython implementation. The compatible abi depends on the python
# version and is checked later.
for package in packages:
arguments = ['--only-binary=:all:', '--no-deps', '--platform',
'manylinux1_x86_64', '--implementation', 'cp',
'--abi', abi, '--dest', directory, package]
self._execute('download', arguments) |
def to_timedelta(value):
"""Converts a string to a timedelta."""
if value is None:
return None
if isinstance(value, (six.integer_types, float)):
return timedelta(microseconds=(float(value) / 10))
match = _TIMESPAN_PATTERN.match(value)
if match:
if match.group(1) == "-":
factor = -1
else:
factor = 1
return factor * timedelta(
days=int(match.group("d") or 0),
hours=int(match.group("h")),
minutes=int(match.group("m")),
seconds=float(match.group("s")),
)
else:
raise ValueError("Timespan value '{}' cannot be decoded".format(value)) | Converts a string to a timedelta. | Below is the the instruction that describes the task:
### Input:
Converts a string to a timedelta.
### Response:
def to_timedelta(value):
"""Converts a string to a timedelta."""
if value is None:
return None
if isinstance(value, (six.integer_types, float)):
return timedelta(microseconds=(float(value) / 10))
match = _TIMESPAN_PATTERN.match(value)
if match:
if match.group(1) == "-":
factor = -1
else:
factor = 1
return factor * timedelta(
days=int(match.group("d") or 0),
hours=int(match.group("h")),
minutes=int(match.group("m")),
seconds=float(match.group("s")),
)
else:
raise ValueError("Timespan value '{}' cannot be decoded".format(value)) |
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, [])) | Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object. | Below is the the instruction that describes the task:
### Input:
Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
### Response:
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, [])) |
def find_globals(code):
"""walks the byte code to find the variables which are actually globals"""
cur_byte = 0
byte_code = code.co_code
names = set()
while cur_byte < len(byte_code):
op = ord(byte_code[cur_byte])
if op >= dis.HAVE_ARGUMENT:
if op == _LOAD_GLOBAL:
oparg = ord(byte_code[cur_byte + 1]) + (ord(byte_code[cur_byte + 2]) << 8)
name = code.co_names[oparg]
names.add(name)
cur_byte += 2
cur_byte += 1
return names | walks the byte code to find the variables which are actually globals | Below is the the instruction that describes the task:
### Input:
walks the byte code to find the variables which are actually globals
### Response:
def find_globals(code):
"""walks the byte code to find the variables which are actually globals"""
cur_byte = 0
byte_code = code.co_code
names = set()
while cur_byte < len(byte_code):
op = ord(byte_code[cur_byte])
if op >= dis.HAVE_ARGUMENT:
if op == _LOAD_GLOBAL:
oparg = ord(byte_code[cur_byte + 1]) + (ord(byte_code[cur_byte + 2]) << 8)
name = code.co_names[oparg]
names.add(name)
cur_byte += 2
cur_byte += 1
return names |
def PushBack(self, string='', **unused_kwargs):
"""Push the match back on the stream.
Args:
string: optional data.
"""
self.buffer = string + self.buffer
self.processed_buffer = self.processed_buffer[:-len(string)] | Push the match back on the stream.
Args:
string: optional data. | Below is the the instruction that describes the task:
### Input:
Push the match back on the stream.
Args:
string: optional data.
### Response:
def PushBack(self, string='', **unused_kwargs):
"""Push the match back on the stream.
Args:
string: optional data.
"""
self.buffer = string + self.buffer
self.processed_buffer = self.processed_buffer[:-len(string)] |
def to_dict(self):
""" Return a dict of the users. """
users = dict(users=list())
for user in self:
users['users'].append(user.to_dict())
return users | Return a dict of the users. | Below is the the instruction that describes the task:
### Input:
Return a dict of the users.
### Response:
def to_dict(self):
""" Return a dict of the users. """
users = dict(users=list())
for user in self:
users['users'].append(user.to_dict())
return users |
def timedelta_div(first: datetime.timedelta,
second: datetime.timedelta) -> Optional[float]:
"""Implement divison for timedelta instances.
:param first: First timedelta instance.
:param second: Second timedelta instance.
"""
first_seconds = timedelta_seconds(first)
second_seconds = timedelta_seconds(second)
if not second_seconds:
return None
return first_seconds / second_seconds | Implement divison for timedelta instances.
:param first: First timedelta instance.
:param second: Second timedelta instance. | Below is the the instruction that describes the task:
### Input:
Implement divison for timedelta instances.
:param first: First timedelta instance.
:param second: Second timedelta instance.
### Response:
def timedelta_div(first: datetime.timedelta,
second: datetime.timedelta) -> Optional[float]:
"""Implement divison for timedelta instances.
:param first: First timedelta instance.
:param second: Second timedelta instance.
"""
first_seconds = timedelta_seconds(first)
second_seconds = timedelta_seconds(second)
if not second_seconds:
return None
return first_seconds / second_seconds |
def MTF(self, px_per_mm):
'''
px_per_mm = cam_resolution / image_size
'''
res = 100 #numeric resolution
r = 4 #range +-r*std
#size of 1 px:
px_size = 1 / px_per_mm
#standard deviation of the point-spread-function (PSF) as normal distributed:
std = self.std*px_size #transform standard deviation from [px] to [mm]
x = np.linspace(-r*std,r*std, res)
#line spread function:
lsf = self.gaussian1d(x, 1, 0, std)
#MTF defined as Fourier transform of the line spread function:
#abs() because result is complex
y = abs(np.fft.fft(lsf))
#normalize fft so that max = 1
y /= np.max(y)
#step length between xn and xn+1
dstep = r*std/res
# Fourier frequencies - here: line pairs(cycles) per mm
freq = np.fft.fftfreq(lsf.size, dstep)
#limit mtf between [0-px_per_mm]:
i = np.argmax(freq>px_per_mm)
self.mtf_x = freq[:i]
self.mtf_y = y[:i]
return self.mtf_x, self.mtf_y | px_per_mm = cam_resolution / image_size | Below is the the instruction that describes the task:
### Input:
px_per_mm = cam_resolution / image_size
### Response:
def MTF(self, px_per_mm):
'''
px_per_mm = cam_resolution / image_size
'''
res = 100 #numeric resolution
r = 4 #range +-r*std
#size of 1 px:
px_size = 1 / px_per_mm
#standard deviation of the point-spread-function (PSF) as normal distributed:
std = self.std*px_size #transform standard deviation from [px] to [mm]
x = np.linspace(-r*std,r*std, res)
#line spread function:
lsf = self.gaussian1d(x, 1, 0, std)
#MTF defined as Fourier transform of the line spread function:
#abs() because result is complex
y = abs(np.fft.fft(lsf))
#normalize fft so that max = 1
y /= np.max(y)
#step length between xn and xn+1
dstep = r*std/res
# Fourier frequencies - here: line pairs(cycles) per mm
freq = np.fft.fftfreq(lsf.size, dstep)
#limit mtf between [0-px_per_mm]:
i = np.argmax(freq>px_per_mm)
self.mtf_x = freq[:i]
self.mtf_y = y[:i]
return self.mtf_x, self.mtf_y |
def _set_lsp_traffic_engineering(self, v, load=False):
"""
Setter method for lsp_traffic_engineering, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_traffic_engineering (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_traffic_engineering is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_traffic_engineering() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_traffic_engineering.lsp_traffic_engineering, is_container='container', presence=False, yang_name="lsp-traffic-engineering", rest_name="traffic-engineering", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LSP traffic engineering parameters', u'alt-name': u'traffic-engineering', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_traffic_engineering must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_traffic_engineering.lsp_traffic_engineering, is_container='container', presence=False, yang_name="lsp-traffic-engineering", rest_name="traffic-engineering", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LSP traffic engineering parameters', u'alt-name': u'traffic-engineering', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_traffic_engineering = t
if hasattr(self, '_set'):
self._set() | Setter method for lsp_traffic_engineering, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_traffic_engineering (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_traffic_engineering is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_traffic_engineering() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for lsp_traffic_engineering, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_traffic_engineering (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_traffic_engineering is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_traffic_engineering() directly.
### Response:
def _set_lsp_traffic_engineering(self, v, load=False):
"""
Setter method for lsp_traffic_engineering, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_traffic_engineering (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_traffic_engineering is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_traffic_engineering() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_traffic_engineering.lsp_traffic_engineering, is_container='container', presence=False, yang_name="lsp-traffic-engineering", rest_name="traffic-engineering", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LSP traffic engineering parameters', u'alt-name': u'traffic-engineering', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_traffic_engineering must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_traffic_engineering.lsp_traffic_engineering, is_container='container', presence=False, yang_name="lsp-traffic-engineering", rest_name="traffic-engineering", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'LSP traffic engineering parameters', u'alt-name': u'traffic-engineering', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_traffic_engineering = t
if hasattr(self, '_set'):
self._set() |
def all_optional_fields(self):
"""
Returns an iterator that traverses optional fields in all super types
first, and then for this type.
"""
def optional_check(f):
return is_nullable_type(f.data_type) or f.has_default
return self._filter_fields(optional_check) | Returns an iterator that traverses optional fields in all super types
first, and then for this type. | Below is the the instruction that describes the task:
### Input:
Returns an iterator that traverses optional fields in all super types
first, and then for this type.
### Response:
def all_optional_fields(self):
"""
Returns an iterator that traverses optional fields in all super types
first, and then for this type.
"""
def optional_check(f):
return is_nullable_type(f.data_type) or f.has_default
return self._filter_fields(optional_check) |
def fit_transform(self, col):
"""Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col) | Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
### Response:
def fit_transform(self, col):
"""Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col) |
def is_page_content_is_already_updated(self, page_id, body):
"""
Compare content and check is already updated or not
:param page_id: Content ID for retrieve storage value
:param body: Body for compare it
:return: True if the same
"""
confluence_content = (self.get_page_by_id(page_id, expand='body.storage').get('body') or {}).get('storage').get(
'value')
confluence_content = confluence_content.replace('ó', u'ó')
log.debug('Old Content: """{body}"""'.format(body=confluence_content))
log.debug('New Content: """{body}"""'.format(body=body))
if confluence_content == body:
log.warning('Content of {page_id} is exactly the same'.format(page_id=page_id))
return True
else:
log.info('Content of {page_id} differs'.format(page_id=page_id))
return False | Compare content and check is already updated or not
:param page_id: Content ID for retrieve storage value
:param body: Body for compare it
:return: True if the same | Below is the the instruction that describes the task:
### Input:
Compare content and check is already updated or not
:param page_id: Content ID for retrieve storage value
:param body: Body for compare it
:return: True if the same
### Response:
def is_page_content_is_already_updated(self, page_id, body):
"""
Compare content and check is already updated or not
:param page_id: Content ID for retrieve storage value
:param body: Body for compare it
:return: True if the same
"""
confluence_content = (self.get_page_by_id(page_id, expand='body.storage').get('body') or {}).get('storage').get(
'value')
confluence_content = confluence_content.replace('ó', u'ó')
log.debug('Old Content: """{body}"""'.format(body=confluence_content))
log.debug('New Content: """{body}"""'.format(body=body))
if confluence_content == body:
log.warning('Content of {page_id} is exactly the same'.format(page_id=page_id))
return True
else:
log.info('Content of {page_id} differs'.format(page_id=page_id))
return False |
def _thread(self):
""" Thread entry point: does the job once, stored results, and dies. """
# Get
args, kwargs = self._jobs.get()
# Stop thread when (None, None) comes in
if args is None and kwargs is None:
return None # Wrappers should exit as well
# Work
try:
self._results.append(self._worker(*args, **kwargs))
return True
except Exception as e:
self._errors.append(e)
return False
finally:
self._jobs.task_done()
with self._jobfinished:
self._jobfinished.notify() | Thread entry point: does the job once, stored results, and dies. | Below is the the instruction that describes the task:
### Input:
Thread entry point: does the job once, stored results, and dies.
### Response:
def _thread(self):
""" Thread entry point: does the job once, stored results, and dies. """
# Get
args, kwargs = self._jobs.get()
# Stop thread when (None, None) comes in
if args is None and kwargs is None:
return None # Wrappers should exit as well
# Work
try:
self._results.append(self._worker(*args, **kwargs))
return True
except Exception as e:
self._errors.append(e)
return False
finally:
self._jobs.task_done()
with self._jobfinished:
self._jobfinished.notify() |
def QA_fetch_future_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.future_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code, auto_fill=False)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']),
item['datetime'], item['tradetime'], item['time_stamp'], item['date'], item['type']])
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'datetime', 'tradetime', 'time_stamp', 'date', 'type'])
__data['datetime'] = pd.to_datetime(__data['datetime'])
__data = __data.set_index('datetime', drop=False)
if format in ['numpy', 'np', 'n']:
return numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
return numpy.asarray(__data).tolist()
elif format in ['P', 'p', 'pandas', 'pd']:
return __data | 获取股票分钟线 | Below is the the instruction that describes the task:
### Input:
获取股票分钟线
### Response:
def QA_fetch_future_min(
code,
start, end,
format='numpy',
frequence='1min',
collections=DATABASE.future_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
__data = []
code = QA_util_code_tolist(code, auto_fill=False)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, batch_size=10000)
if format in ['dict', 'json']:
return [data for data in cursor]
for item in cursor:
__data.append([str(item['code']), float(item['open']), float(item['high']), float(
item['low']), float(item['close']), float(item['position']), float(item['price']), float(item['trade']),
item['datetime'], item['tradetime'], item['time_stamp'], item['date'], item['type']])
__data = DataFrame(__data, columns=[
'code', 'open', 'high', 'low', 'close', 'position', 'price', 'trade', 'datetime', 'tradetime', 'time_stamp', 'date', 'type'])
__data['datetime'] = pd.to_datetime(__data['datetime'])
__data = __data.set_index('datetime', drop=False)
if format in ['numpy', 'np', 'n']:
return numpy.asarray(__data)
elif format in ['list', 'l', 'L']:
return numpy.asarray(__data).tolist()
elif format in ['P', 'p', 'pandas', 'pd']:
return __data |
def _pluralize(value, item_key):
""""Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items.
"""
v = value[item_key]
if not isinstance(v, list):
# Force a singular value to be a list
return [v]
else:
return v | Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items. | Below is the the instruction that describes the task:
### Input:
Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items.
### Response:
def _pluralize(value, item_key):
""""Force the value of a datacite3 key to be a list.
>>> _pluralize(xml_input['authors'], 'author')
['Sick, Jonathan', 'Economou, Frossie']
Background
----------
When `xmltodict` proceses metadata, it turns XML tags into new key-value
pairs whenever possible, even if the value should semantically be treated
as a `list`.
For example
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
</authors
Would be rendered by `xmltodict` as::
{'authors': {'author': 'Sick, Jonathan'}}
While
.. code-block:: xml
<authors>
<author>Sick, Jonathan</author>
<author>Economou, Frossie</author>
</authors
is rendered by `xmltodict` as::
{'authors': [{'author': ['Sick, Jonathan', 'Economou, Frossie']}}
This function ensures that values are *always* lists so that they can be
treated uniformly.
Parameters
----------
value : obj
The value of a key from datacite metadata extracted by `xmltodict`.
For example, `xmldict['authors']`.
item_key : str
Name of the tag for each item; for example, with the `'authors'` key
the item key is `'author'`.
Returns
-------
item_values : list
List of values of all items.
"""
v = value[item_key]
if not isinstance(v, list):
# Force a singular value to be a list
return [v]
else:
return v |
def disclaimer_title_header_element(feature, parent):
"""Retrieve disclaimer title header string from definitions."""
_ = feature, parent # NOQA
header = disclaimer_title_header['string_format']
return header.capitalize() | Retrieve disclaimer title header string from definitions. | Below is the the instruction that describes the task:
### Input:
Retrieve disclaimer title header string from definitions.
### Response:
def disclaimer_title_header_element(feature, parent):
"""Retrieve disclaimer title header string from definitions."""
_ = feature, parent # NOQA
header = disclaimer_title_header['string_format']
return header.capitalize() |
def subscription_payment_required(
function=None, plan=None, pay_page=SUBSCRIPTION_REDIRECT
):
"""
Decorator for views that require subscription payment.
Redirects to `pay_page` if necessary.
"""
actual_decorator = subscriber_passes_pay_test(
subscriber_has_active_subscription, plan=plan, pay_page=pay_page
)
if function:
return actual_decorator(function)
return actual_decorator | Decorator for views that require subscription payment.
Redirects to `pay_page` if necessary. | Below is the the instruction that describes the task:
### Input:
Decorator for views that require subscription payment.
Redirects to `pay_page` if necessary.
### Response:
def subscription_payment_required(
function=None, plan=None, pay_page=SUBSCRIPTION_REDIRECT
):
"""
Decorator for views that require subscription payment.
Redirects to `pay_page` if necessary.
"""
actual_decorator = subscriber_passes_pay_test(
subscriber_has_active_subscription, plan=plan, pay_page=pay_page
)
if function:
return actual_decorator(function)
return actual_decorator |
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s) | Write string s to the stream. | Below is the the instruction that describes the task:
### Input:
Write string s to the stream.
### Response:
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s) |
def _process(self, segments):
"""sort segments in read order - left to right, up to down"""
# sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0]
# segments= sorted(segments, key=sort_f)
# segments= segments_to_numpy( segments )
# return segments
mlh, mlw = self.max_line_height, self.max_line_width
s = segments.astype(numpy.uint32) # prevent overflows
order = mlw * (s[:, 1] // mlh) + s[:, 0]
sort_order = numpy.argsort(order)
return segments[sort_order] | sort segments in read order - left to right, up to down | Below is the the instruction that describes the task:
### Input:
sort segments in read order - left to right, up to down
### Response:
def _process(self, segments):
"""sort segments in read order - left to right, up to down"""
# sort_f= lambda r: max_line_width*(r[1]/max_line_height)+r[0]
# segments= sorted(segments, key=sort_f)
# segments= segments_to_numpy( segments )
# return segments
mlh, mlw = self.max_line_height, self.max_line_width
s = segments.astype(numpy.uint32) # prevent overflows
order = mlw * (s[:, 1] // mlh) + s[:, 0]
sort_order = numpy.argsort(order)
return segments[sort_order] |
def _auto_scroll(self, *args):
""" Scroll to the end of the text view """
adj = self['scrollable'].get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size()) | Scroll to the end of the text view | Below is the the instruction that describes the task:
### Input:
Scroll to the end of the text view
### Response:
def _auto_scroll(self, *args):
""" Scroll to the end of the text view """
adj = self['scrollable'].get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size()) |
def fast_sync_snapshot_compress( snapshot_dir, export_path ):
"""
Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure
"""
snapshot_dir = os.path.abspath(snapshot_dir)
export_path = os.path.abspath(export_path)
if os.path.exists(export_path):
return {'error': 'Snapshot path exists: {}'.format(export_path)}
old_dir = os.getcwd()
count_ref = [0]
def print_progress(tarinfo):
count_ref[0] += 1
if count_ref[0] % 100 == 0:
log.debug("{} files compressed...".format(count_ref[0]))
return tarinfo
try:
os.chdir(snapshot_dir)
with tarfile.TarFile.bz2open(export_path, "w") as f:
f.add(".", filter=print_progress)
except:
os.chdir(old_dir)
raise
finally:
os.chdir(old_dir)
return {'status': True} | Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure | Below is the the instruction that describes the task:
### Input:
Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure
### Response:
def fast_sync_snapshot_compress( snapshot_dir, export_path ):
"""
Given the path to a directory, compress it and export it to the
given path.
Return {'status': True} on success
Return {'error': ...} on failure
"""
snapshot_dir = os.path.abspath(snapshot_dir)
export_path = os.path.abspath(export_path)
if os.path.exists(export_path):
return {'error': 'Snapshot path exists: {}'.format(export_path)}
old_dir = os.getcwd()
count_ref = [0]
def print_progress(tarinfo):
count_ref[0] += 1
if count_ref[0] % 100 == 0:
log.debug("{} files compressed...".format(count_ref[0]))
return tarinfo
try:
os.chdir(snapshot_dir)
with tarfile.TarFile.bz2open(export_path, "w") as f:
f.add(".", filter=print_progress)
except:
os.chdir(old_dir)
raise
finally:
os.chdir(old_dir)
return {'status': True} |
def validate_ip_address(ip_address):
"""Validate the ip_address
:param ip_address: (str) IP address
:return: (bool) True if the ip_address is valid
"""
# Validate the IP address
log = logging.getLogger(mod_logger + '.validate_ip_address')
if not isinstance(ip_address, basestring):
log.warn('ip_address argument is not a string')
return False
# Ensure there are 3 dots
num_dots = 0
for c in ip_address:
if c == '.':
num_dots += 1
if num_dots != 3:
log.info('Not a valid IP address: {i}'.format(i=ip_address))
return False
# Use the socket module to test
try:
socket.inet_aton(ip_address)
except socket.error as e:
log.info('Not a valid IP address: {i}\n{e}'.format(i=ip_address, e=e))
return False
else:
log.info('Validated IP address: %s', ip_address)
return True | Validate the ip_address
:param ip_address: (str) IP address
:return: (bool) True if the ip_address is valid | Below is the the instruction that describes the task:
### Input:
Validate the ip_address
:param ip_address: (str) IP address
:return: (bool) True if the ip_address is valid
### Response:
def validate_ip_address(ip_address):
"""Validate the ip_address
:param ip_address: (str) IP address
:return: (bool) True if the ip_address is valid
"""
# Validate the IP address
log = logging.getLogger(mod_logger + '.validate_ip_address')
if not isinstance(ip_address, basestring):
log.warn('ip_address argument is not a string')
return False
# Ensure there are 3 dots
num_dots = 0
for c in ip_address:
if c == '.':
num_dots += 1
if num_dots != 3:
log.info('Not a valid IP address: {i}'.format(i=ip_address))
return False
# Use the socket module to test
try:
socket.inet_aton(ip_address)
except socket.error as e:
log.info('Not a valid IP address: {i}\n{e}'.format(i=ip_address, e=e))
return False
else:
log.info('Validated IP address: %s', ip_address)
return True |
def register_provider(cls, provider):
"""Register method to keep list of providers."""
def decorator(subclass):
"""Register as decorator function."""
cls._providers[provider] = subclass
subclass.name = provider
return subclass
return decorator | Register method to keep list of providers. | Below is the the instruction that describes the task:
### Input:
Register method to keep list of providers.
### Response:
def register_provider(cls, provider):
"""Register method to keep list of providers."""
def decorator(subclass):
"""Register as decorator function."""
cls._providers[provider] = subclass
subclass.name = provider
return subclass
return decorator |
def sbo_list():
"""Return all SBo packages
"""
sbo_packages = []
for pkg in os.listdir(_meta_.pkg_path):
if pkg.endswith("_SBo"):
sbo_packages.append(pkg)
return sbo_packages | Return all SBo packages | Below is the the instruction that describes the task:
### Input:
Return all SBo packages
### Response:
def sbo_list():
"""Return all SBo packages
"""
sbo_packages = []
for pkg in os.listdir(_meta_.pkg_path):
if pkg.endswith("_SBo"):
sbo_packages.append(pkg)
return sbo_packages |
def _hexencode(bytestring, insert_spaces = False):
"""Convert a byte string to a hex encoded string.
For example 'J' will return '4A', and ``'\\x04'`` will return '04'.
Args:
bytestring (str): Can be for example ``'A\\x01B\\x45'``.
insert_spaces (bool): Insert space characters between pair of characters to increase readability.
Returns:
A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.
The string will be longer if spaces are inserted.
Raises:
TypeError, ValueError
"""
_checkString(bytestring, description='byte string')
separator = '' if not insert_spaces else ' '
# Use plain string formatting instead of binhex.hexlify,
# in order to have it Python 2.x and 3.x compatible
byte_representions = []
for c in bytestring:
byte_representions.append( '{0:02X}'.format(ord(c)) )
return separator.join(byte_representions).strip() | Convert a byte string to a hex encoded string.
For example 'J' will return '4A', and ``'\\x04'`` will return '04'.
Args:
bytestring (str): Can be for example ``'A\\x01B\\x45'``.
insert_spaces (bool): Insert space characters between pair of characters to increase readability.
Returns:
A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.
The string will be longer if spaces are inserted.
Raises:
TypeError, ValueError | Below is the the instruction that describes the task:
### Input:
Convert a byte string to a hex encoded string.
For example 'J' will return '4A', and ``'\\x04'`` will return '04'.
Args:
bytestring (str): Can be for example ``'A\\x01B\\x45'``.
insert_spaces (bool): Insert space characters between pair of characters to increase readability.
Returns:
A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.
The string will be longer if spaces are inserted.
Raises:
TypeError, ValueError
### Response:
def _hexencode(bytestring, insert_spaces = False):
"""Convert a byte string to a hex encoded string.
For example 'J' will return '4A', and ``'\\x04'`` will return '04'.
Args:
bytestring (str): Can be for example ``'A\\x01B\\x45'``.
insert_spaces (bool): Insert space characters between pair of characters to increase readability.
Returns:
A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'.
The string will be longer if spaces are inserted.
Raises:
TypeError, ValueError
"""
_checkString(bytestring, description='byte string')
separator = '' if not insert_spaces else ' '
# Use plain string formatting instead of binhex.hexlify,
# in order to have it Python 2.x and 3.x compatible
byte_representions = []
for c in bytestring:
byte_representions.append( '{0:02X}'.format(ord(c)) )
return separator.join(byte_representions).strip() |
def Flemmer_Banks(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \frac{24}{Re}10^E
E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2}
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5
Examples
--------
>>> Flemmer_Banks(200.)
0.7849169609270039
References
----------
.. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a
Sphere." Powder Technology 48, no. 3 (November 1986): 217-21.
doi:10.1016/0032-5910(86)80044-4.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2)
Cd = 24./Re*10**E
return Cd | r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \frac{24}{Re}10^E
E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2}
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5
Examples
--------
>>> Flemmer_Banks(200.)
0.7849169609270039
References
----------
.. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a
Sphere." Powder Technology 48, no. 3 (November 1986): 217-21.
doi:10.1016/0032-5910(86)80044-4.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045. | Below is the the instruction that describes the task:
### Input:
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \frac{24}{Re}10^E
E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2}
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5
Examples
--------
>>> Flemmer_Banks(200.)
0.7849169609270039
References
----------
.. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a
Sphere." Powder Technology 48, no. 3 (November 1986): 217-21.
doi:10.1016/0032-5910(86)80044-4.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
### Response:
def Flemmer_Banks(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \frac{24}{Re}10^E
E = 0.383Re^{0.356}-0.207Re^{0.396} - \frac{0.143}{1+(\log_{10} Re)^2}
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5
Examples
--------
>>> Flemmer_Banks(200.)
0.7849169609270039
References
----------
.. [1] Flemmer, R. L. C., and C. L. Banks. "On the Drag Coefficient of a
Sphere." Powder Technology 48, no. 3 (November 1986): 217-21.
doi:10.1016/0032-5910(86)80044-4.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
E = 0.383*Re**0.356 - 0.207*Re**0.396 - 0.143/(1 + (log10(Re))**2)
Cd = 24./Re*10**E
return Cd |
def iter_content(self, chunk_size=1024, decode_unicode=False):
"""Get request body as iterator content (stream).
:param int chunk_size:
:param bool decode_unicode:
"""
return self.response.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode) | Get request body as iterator content (stream).
:param int chunk_size:
:param bool decode_unicode: | Below is the the instruction that describes the task:
### Input:
Get request body as iterator content (stream).
:param int chunk_size:
:param bool decode_unicode:
### Response:
def iter_content(self, chunk_size=1024, decode_unicode=False):
"""Get request body as iterator content (stream).
:param int chunk_size:
:param bool decode_unicode:
"""
return self.response.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode) |
def mult(self, matrix):
"""
Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.
"""
if self.ncols != matrix.nrows:
raise H2OValueError("Matrix is not compatible for multiplication with the current frame")
return H2OFrame._expr(expr=ExprNode("x", self, matrix)) | Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``. | Below is the the instruction that describes the task:
### Input:
Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.
### Response:
def mult(self, matrix):
"""
Multiply this frame, viewed as a matrix, by another matrix.
:param matrix: another frame that you want to multiply the current frame by; must be compatible with the
current frame (i.e. its number of rows must be the same as number of columns in the current frame).
:returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.
"""
if self.ncols != matrix.nrows:
raise H2OValueError("Matrix is not compatible for multiplication with the current frame")
return H2OFrame._expr(expr=ExprNode("x", self, matrix)) |
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
profilers = cls._ParseStringOption(options, 'profilers')
if not profilers:
profilers = set()
elif profilers.lower() != 'list':
profilers = set(profilers.split(','))
supported_profilers = set(cls.PROFILERS_INFORMATION.keys())
unsupported_profilers = profilers.difference(supported_profilers)
if unsupported_profilers:
unsupported_profilers = ', '.join(unsupported_profilers)
raise errors.BadConfigOption(
'Unsupported profilers: {0:s}'.format(unsupported_profilers))
profiling_directory = getattr(options, 'profiling_directory', None)
if profiling_directory and not os.path.isdir(profiling_directory):
raise errors.BadConfigOption(
'No such profiling directory: {0:s}'.format(profiling_directory))
profiling_sample_rate = getattr(options, 'profiling_sample_rate', None)
if not profiling_sample_rate:
profiling_sample_rate = cls.DEFAULT_PROFILING_SAMPLE_RATE
else:
try:
profiling_sample_rate = int(profiling_sample_rate, 10)
except (TypeError, ValueError):
raise errors.BadConfigOption(
'Invalid profile sample rate: {0!s}.'.format(profiling_sample_rate))
setattr(configuration_object, '_profilers', profilers)
setattr(configuration_object, '_profiling_directory', profiling_directory)
setattr(
configuration_object, '_profiling_sample_rate', profiling_sample_rate) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. | Below is the the instruction that describes the task:
### Input:
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
### Response:
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
profilers = cls._ParseStringOption(options, 'profilers')
if not profilers:
profilers = set()
elif profilers.lower() != 'list':
profilers = set(profilers.split(','))
supported_profilers = set(cls.PROFILERS_INFORMATION.keys())
unsupported_profilers = profilers.difference(supported_profilers)
if unsupported_profilers:
unsupported_profilers = ', '.join(unsupported_profilers)
raise errors.BadConfigOption(
'Unsupported profilers: {0:s}'.format(unsupported_profilers))
profiling_directory = getattr(options, 'profiling_directory', None)
if profiling_directory and not os.path.isdir(profiling_directory):
raise errors.BadConfigOption(
'No such profiling directory: {0:s}'.format(profiling_directory))
profiling_sample_rate = getattr(options, 'profiling_sample_rate', None)
if not profiling_sample_rate:
profiling_sample_rate = cls.DEFAULT_PROFILING_SAMPLE_RATE
else:
try:
profiling_sample_rate = int(profiling_sample_rate, 10)
except (TypeError, ValueError):
raise errors.BadConfigOption(
'Invalid profile sample rate: {0!s}.'.format(profiling_sample_rate))
setattr(configuration_object, '_profilers', profilers)
setattr(configuration_object, '_profiling_directory', profiling_directory)
setattr(
configuration_object, '_profiling_sample_rate', profiling_sample_rate) |
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
**kwargs):
"""
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
"""
if self.scope:
variable_path = '{0.scope}{0.path_separator}{1}'.format(self, variable_path)
if self.key_prefix:
variable_path = '{0.key_prefix}:{1}'.format(self, variable_path)
val = self.client.get(variable_path)
if val is None:
return default
if val.startswith(self.object_serialize_prefix):
# since complex data types are yaml-serialized there's no need to coerce anything
_val = val[len(self.object_serialize_prefix):]
bundle = self.object_deserialize(_val)
if bundle == '': # check for reinforced empty flag
return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer)
return bundle
if isinstance(val, bytes):
val = val.decode()
return self.coerce(val, coerce_type=coerce_type, coercer=coercer) | :param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default | Below is the the instruction that describes the task:
### Input:
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
### Response:
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
**kwargs):
"""
:param variable_path: a delimiter-separated path to a nested value
:param default: default value if there's no object by specified path
:param coerce_type: cast a type of a value to a specified one
:param coercer: perform a type casting with specified callback
:param kwargs: additional arguments inherited parser may need
:return: value or default
"""
if self.scope:
variable_path = '{0.scope}{0.path_separator}{1}'.format(self, variable_path)
if self.key_prefix:
variable_path = '{0.key_prefix}:{1}'.format(self, variable_path)
val = self.client.get(variable_path)
if val is None:
return default
if val.startswith(self.object_serialize_prefix):
# since complex data types are yaml-serialized there's no need to coerce anything
_val = val[len(self.object_serialize_prefix):]
bundle = self.object_deserialize(_val)
if bundle == '': # check for reinforced empty flag
return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer)
return bundle
if isinstance(val, bytes):
val = val.decode()
return self.coerce(val, coerce_type=coerce_type, coercer=coercer) |
def get_converted_relative_path(path, relative_to=None):
"""Convert `path` to be relative.
Given a vague relative path, return the path relative to the given
location.
:param str path: The location of a target path
:param str relative_to: The starting path to build against, optional
:returns: A relative posix-style path with a leading `./`
This performs additional conversion to ensure the result is of POSIX form,
and starts with `./`, or is precisely `.`.
>>> os.chdir('/home/user/code/myrepo/myfolder')
>>> vistir.path.get_converted_relative_path('/home/user/code/file.zip')
'./../../file.zip'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder/mysubfolder')
'./mysubfolder'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder')
'.'
"""
from .misc import to_text, to_bytes # noqa
if not relative_to:
relative_to = os.getcwdu() if six.PY2 else os.getcwd()
if six.PY2:
path = to_bytes(path, encoding="utf-8")
else:
path = to_text(path, encoding="utf-8")
relative_to = to_text(relative_to, encoding="utf-8")
start_path = Path(relative_to)
try:
start = start_path.resolve()
except OSError:
start = start_path.absolute()
# check if there is a drive letter or mount point
# if it is a mountpoint use the original absolute path
# instead of the unc path
if check_for_unc_path(start):
start = start_path.absolute()
path = start.joinpath(path).relative_to(start)
# check and see if the path that was passed into the function is a UNC path
# and raise value error if it is not.
if check_for_unc_path(path):
raise ValueError("The path argument does not currently accept UNC paths")
relpath_s = to_text(posixpath.normpath(path.as_posix()))
if not (relpath_s == "." or relpath_s.startswith("./")):
relpath_s = posixpath.join(".", relpath_s)
return relpath_s | Convert `path` to be relative.
Given a vague relative path, return the path relative to the given
location.
:param str path: The location of a target path
:param str relative_to: The starting path to build against, optional
:returns: A relative posix-style path with a leading `./`
This performs additional conversion to ensure the result is of POSIX form,
and starts with `./`, or is precisely `.`.
>>> os.chdir('/home/user/code/myrepo/myfolder')
>>> vistir.path.get_converted_relative_path('/home/user/code/file.zip')
'./../../file.zip'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder/mysubfolder')
'./mysubfolder'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder')
'.' | Below is the the instruction that describes the task:
### Input:
Convert `path` to be relative.
Given a vague relative path, return the path relative to the given
location.
:param str path: The location of a target path
:param str relative_to: The starting path to build against, optional
:returns: A relative posix-style path with a leading `./`
This performs additional conversion to ensure the result is of POSIX form,
and starts with `./`, or is precisely `.`.
>>> os.chdir('/home/user/code/myrepo/myfolder')
>>> vistir.path.get_converted_relative_path('/home/user/code/file.zip')
'./../../file.zip'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder/mysubfolder')
'./mysubfolder'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder')
'.'
### Response:
def get_converted_relative_path(path, relative_to=None):
"""Convert `path` to be relative.
Given a vague relative path, return the path relative to the given
location.
:param str path: The location of a target path
:param str relative_to: The starting path to build against, optional
:returns: A relative posix-style path with a leading `./`
This performs additional conversion to ensure the result is of POSIX form,
and starts with `./`, or is precisely `.`.
>>> os.chdir('/home/user/code/myrepo/myfolder')
>>> vistir.path.get_converted_relative_path('/home/user/code/file.zip')
'./../../file.zip'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder/mysubfolder')
'./mysubfolder'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder')
'.'
"""
from .misc import to_text, to_bytes # noqa
if not relative_to:
relative_to = os.getcwdu() if six.PY2 else os.getcwd()
if six.PY2:
path = to_bytes(path, encoding="utf-8")
else:
path = to_text(path, encoding="utf-8")
relative_to = to_text(relative_to, encoding="utf-8")
start_path = Path(relative_to)
try:
start = start_path.resolve()
except OSError:
start = start_path.absolute()
# check if there is a drive letter or mount point
# if it is a mountpoint use the original absolute path
# instead of the unc path
if check_for_unc_path(start):
start = start_path.absolute()
path = start.joinpath(path).relative_to(start)
# check and see if the path that was passed into the function is a UNC path
# and raise value error if it is not.
if check_for_unc_path(path):
raise ValueError("The path argument does not currently accept UNC paths")
relpath_s = to_text(posixpath.normpath(path.as_posix()))
if not (relpath_s == "." or relpath_s.startswith("./")):
relpath_s = posixpath.join(".", relpath_s)
return relpath_s |
def derivative_factory(name):
"""Create derivative function for some ufuncs."""
if name == 'sin':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(cos(self.domain)(point))
elif name == 'cos':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(-sin(self.domain)(point))
elif name == 'tan':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(1 + self(point) ** 2)
elif name == 'sqrt':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(0.5 / self(point))
elif name == 'square':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(2.0 * point)
elif name == 'log':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(1.0 / point)
elif name == 'exp':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(self(point))
elif name == 'reciprocal':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(-self(point) ** 2)
elif name == 'sinh':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(cosh(self.domain)(point))
elif name == 'cosh':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(sinh(self.domain)(point))
else:
# Fallback to default
derivative = Operator.derivative
return derivative | Create derivative function for some ufuncs. | Below is the the instruction that describes the task:
### Input:
Create derivative function for some ufuncs.
### Response:
def derivative_factory(name):
"""Create derivative function for some ufuncs."""
if name == 'sin':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(cos(self.domain)(point))
elif name == 'cos':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(-sin(self.domain)(point))
elif name == 'tan':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(1 + self(point) ** 2)
elif name == 'sqrt':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(0.5 / self(point))
elif name == 'square':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(2.0 * point)
elif name == 'log':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(1.0 / point)
elif name == 'exp':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(self(point))
elif name == 'reciprocal':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(-self(point) ** 2)
elif name == 'sinh':
def derivative(self, point):
"""Return the derivative operator."""
point = self.domain.element(point)
return MultiplyOperator(cosh(self.domain)(point))
elif name == 'cosh':
def derivative(self, point):
"""Return the derivative operator."""
return MultiplyOperator(sinh(self.domain)(point))
else:
# Fallback to default
derivative = Operator.derivative
return derivative |
def decode(self, encoded):
""" Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``.
"""
encoded = super().decode(encoded)
return self.tokenizer.decode([self.itos[index] for index in encoded]) | Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``. | Below is the the instruction that describes the task:
### Input:
Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``.
### Response:
def decode(self, encoded):
""" Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``.
"""
encoded = super().decode(encoded)
return self.tokenizer.decode([self.itos[index] for index in encoded]) |
def push_file(self, local_source, remote_dir):
''' Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed.
'''
remote_dest = remote_dir + '/' + os.path.basename(local_source)
try:
self.makedirs(remote_dir, exist_ok=True)
except IOError as e:
logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir))
if e.errno == 2:
raise BadScriptPath(e, self.hostname)
elif e.errno == 13:
raise BadPermsScriptPath(e, self.hostname)
else:
logger.exception("File push failed due to SFTP client failure")
raise FileCopyException(e, self.hostname)
try:
self.sftp_client.put(local_source, remote_dest, confirm=True)
# Set perm because some systems require the script to be executable
self.sftp_client.chmod(remote_dest, 0o777)
except Exception as e:
logger.exception("File push from local source {} to remote destination {} failed".format(
local_source, remote_dest))
raise FileCopyException(e, self.hostname)
return remote_dest | Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed. | Below is the the instruction that describes the task:
### Input:
Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed.
### Response:
def push_file(self, local_source, remote_dir):
''' Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed.
'''
remote_dest = remote_dir + '/' + os.path.basename(local_source)
try:
self.makedirs(remote_dir, exist_ok=True)
except IOError as e:
logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir))
if e.errno == 2:
raise BadScriptPath(e, self.hostname)
elif e.errno == 13:
raise BadPermsScriptPath(e, self.hostname)
else:
logger.exception("File push failed due to SFTP client failure")
raise FileCopyException(e, self.hostname)
try:
self.sftp_client.put(local_source, remote_dest, confirm=True)
# Set perm because some systems require the script to be executable
self.sftp_client.chmod(remote_dest, 0o777)
except Exception as e:
logger.exception("File push from local source {} to remote destination {} failed".format(
local_source, remote_dest))
raise FileCopyException(e, self.hostname)
return remote_dest |
def primary_transcript(entrystream, parenttype='gene', logstream=stderr):
"""
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue
for parent in tag.select.features(entry, parenttype, traverse=True):
if parent.num_children == 0:
continue
transcripts = defaultdict(list)
for child in parent.children:
if child.type in type_terms:
transcripts[child.type].append(child)
if len(transcripts) == 0:
continue
ttypes = list(transcripts.keys())
ttype = _get_primary_type(ttypes, parent)
transcript_list = transcripts[ttype]
if ttype == 'mRNA':
_emplace_pmrna(transcript_list, parent, strict=True)
else:
_emplace_transcript(transcript_list, parent)
yield entry | Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare. | Below is the the instruction that describes the task:
### Input:
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
### Response:
def primary_transcript(entrystream, parenttype='gene', logstream=stderr):
"""
Select a single transcript as a representative for each gene.
This function is a generalization of the `primary_mrna` function that
attempts, under certain conditions, to select a single transcript as a
representative for each gene. If a gene encodes multiple transcript types,
one of those types must be **mRNA** or the function will complain loudly
and fail.
For mRNAs, the primary transcript is selected according to translated
length. For all other transcript types, the length of the transcript
feature itself is used. I'd be eager to hear suggestions for alternative
selection criteria.
Like the `primary_mrna` function, this function **does not** return only
transcript features. It **does** modify gene features to ensure that each
has at most one transcript feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz'))
>>> gene_filter = tag.select.features(reader, type='gene')
>>> trans_filter = tag.transcript.primary_transcript(gene_filter)
>>> for gene in trans_filter:
... assert gene.num_children == 1
In cases where the direct children of a gene feature have heterogenous
types, the `primary_mrna` function will only discard mRNA features. This
function, however, will discard all direct children of the gene that are
not the primary transcript, including non-transcript children. This is a
retty subtle distinction, and anecdotal experience suggests that cases in
which the distinction actually matters are extremely rare.
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue
for parent in tag.select.features(entry, parenttype, traverse=True):
if parent.num_children == 0:
continue
transcripts = defaultdict(list)
for child in parent.children:
if child.type in type_terms:
transcripts[child.type].append(child)
if len(transcripts) == 0:
continue
ttypes = list(transcripts.keys())
ttype = _get_primary_type(ttypes, parent)
transcript_list = transcripts[ttype]
if ttype == 'mRNA':
_emplace_pmrna(transcript_list, parent, strict=True)
else:
_emplace_transcript(transcript_list, parent)
yield entry |
def get_all_supplier_properties(self, params=None):
"""
Get all supplier properties
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(
get_function=self.get_supplier_properties_per_page,
resource=SUPPLIER_PROPERTIES,
**{'params': params}
) | Get all supplier properties
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list | Below is the the instruction that describes the task:
### Input:
Get all supplier properties
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
### Response:
def get_all_supplier_properties(self, params=None):
"""
Get all supplier properties
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(
get_function=self.get_supplier_properties_per_page,
resource=SUPPLIER_PROPERTIES,
**{'params': params}
) |
def get_object_data(obj, fields, safe):
"""
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
"""
temp_dict = dict()
for field in fields:
try:
attribute = getattr(obj, str(field))
if isinstance(attribute, list) and all([isinstance(item, models.Model) for item in attribute]):
temp_dict[field] = []
for item in attribute:
temp_dict[field].append(get_object_data(item, get_fields(item), safe)) # Recur
elif isinstance(attribute, models.Model):
attribute_fields = get_fields(attribute)
object_data = get_object_data(attribute, attribute_fields, safe) # Recur
temp_dict[field] = object_data
else:
if not safe:
if isinstance(attribute, basestring):
attribute = cgi.escape(attribute)
temp_dict[field] = attribute
except Exception as e:
logger.info("Unable to get attribute.")
logger.error(e)
continue
return temp_dict | Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary. | Below is the the instruction that describes the task:
### Input:
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
### Response:
def get_object_data(obj, fields, safe):
"""
Given an object and a list of fields, recursively build an object for serialization.
Returns a dictionary.
"""
temp_dict = dict()
for field in fields:
try:
attribute = getattr(obj, str(field))
if isinstance(attribute, list) and all([isinstance(item, models.Model) for item in attribute]):
temp_dict[field] = []
for item in attribute:
temp_dict[field].append(get_object_data(item, get_fields(item), safe)) # Recur
elif isinstance(attribute, models.Model):
attribute_fields = get_fields(attribute)
object_data = get_object_data(attribute, attribute_fields, safe) # Recur
temp_dict[field] = object_data
else:
if not safe:
if isinstance(attribute, basestring):
attribute = cgi.escape(attribute)
temp_dict[field] = attribute
except Exception as e:
logger.info("Unable to get attribute.")
logger.error(e)
continue
return temp_dict |
def WritePathInfos(self, client_id, path_infos):
"""Writes a collection of path_info records for a client."""
try:
self._MultiWritePathInfos({client_id: path_infos})
except MySQLdb.IntegrityError as error:
raise db.UnknownClientError(client_id=client_id, cause=error) | Writes a collection of path_info records for a client. | Below is the the instruction that describes the task:
### Input:
Writes a collection of path_info records for a client.
### Response:
def WritePathInfos(self, client_id, path_infos):
"""Writes a collection of path_info records for a client."""
try:
self._MultiWritePathInfos({client_id: path_infos})
except MySQLdb.IntegrityError as error:
raise db.UnknownClientError(client_id=client_id, cause=error) |
def handle_group(self, text, capture=None, is_format=False):
"""Handle groups."""
if capture is None:
capture = tuple() if self.is_bytes else ''
if len(self.result) > 1:
self.literal_slots.append("".join(self.result))
if is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
del self.result[:]
self.result.append("")
self.slot += 1
elif is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
self.group_slots.append(
(
self.slot,
(
(self.span_stack[-1] if self.span_stack else None),
self.get_single_stack(),
capture
)
)
)
self.slot += 1 | Handle groups. | Below is the the instruction that describes the task:
### Input:
Handle groups.
### Response:
def handle_group(self, text, capture=None, is_format=False):
"""Handle groups."""
if capture is None:
capture = tuple() if self.is_bytes else ''
if len(self.result) > 1:
self.literal_slots.append("".join(self.result))
if is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
del self.result[:]
self.result.append("")
self.slot += 1
elif is_format:
self.literal_slots.extend(["\\g<", text, ">"])
else:
self.literal_slots.append(text)
self.group_slots.append(
(
self.slot,
(
(self.span_stack[-1] if self.span_stack else None),
self.get_single_stack(),
capture
)
)
)
self.slot += 1 |
def preferred_encodings(self):
"""
The list of user defined encodings, for display in the encodings
menu/combobox.
"""
default_encodings = [
locale.getpreferredencoding().lower().replace('-', '_')]
if 'utf_8' not in default_encodings:
default_encodings.append('utf_8')
default_encodings = list(set(default_encodings))
return json.loads(self._settings.value(
'userDefinedEncodings', json.dumps(default_encodings))) | The list of user defined encodings, for display in the encodings
menu/combobox. | Below is the the instruction that describes the task:
### Input:
The list of user defined encodings, for display in the encodings
menu/combobox.
### Response:
def preferred_encodings(self):
"""
The list of user defined encodings, for display in the encodings
menu/combobox.
"""
default_encodings = [
locale.getpreferredencoding().lower().replace('-', '_')]
if 'utf_8' not in default_encodings:
default_encodings.append('utf_8')
default_encodings = list(set(default_encodings))
return json.loads(self._settings.value(
'userDefinedEncodings', json.dumps(default_encodings))) |
def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret | Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2% | Below is the the instruction that describes the task:
### Input:
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
### Response:
def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret |
def c(self):
"""Caching client for not repeapting checks"""
if self._client is None:
self._parse_settings()
self._client = Rumetr(**self.settings)
return self._client | Caching client for not repeapting checks | Below is the the instruction that describes the task:
### Input:
Caching client for not repeapting checks
### Response:
def c(self):
"""Caching client for not repeapting checks"""
if self._client is None:
self._parse_settings()
self._client = Rumetr(**self.settings)
return self._client |
def _slug_strip(self, value):
"""
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value) | Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator. | Below is the the instruction that describes the task:
### Input:
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
### Response:
def _slug_strip(self, value):
"""
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value) |
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, absolute path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
addon_path = os.path.join(self.extensionsDir, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(self.extensionsDir):
os.makedirs(self.extensionsDir)
os.chmod(self.extensionsDir, 0o755)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir) | Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, absolute path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf | Below is the the instruction that describes the task:
### Input:
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, absolute path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
### Response:
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, absolute path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
addon_path = os.path.join(self.extensionsDir, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(self.extensionsDir):
os.makedirs(self.extensionsDir)
os.chmod(self.extensionsDir, 0o755)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir) |
def __get_uri(self, server):
"""Return the URI for the given server dict."""
# Select the connection mode (with or without password)
if server['password'] != "":
if server['status'] == 'PROTECTED':
# Try with the preconfigure password (only if status is PROTECTED)
clear_password = self.password.get_password(server['name'])
if clear_password is not None:
server['password'] = self.password.sha256_hash(clear_password)
return 'http://{}:{}@{}:{}'.format(server['username'], server['password'],
server['ip'], server['port'])
else:
return 'http://{}:{}'.format(server['ip'], server['port']) | Return the URI for the given server dict. | Below is the the instruction that describes the task:
### Input:
Return the URI for the given server dict.
### Response:
def __get_uri(self, server):
"""Return the URI for the given server dict."""
# Select the connection mode (with or without password)
if server['password'] != "":
if server['status'] == 'PROTECTED':
# Try with the preconfigure password (only if status is PROTECTED)
clear_password = self.password.get_password(server['name'])
if clear_password is not None:
server['password'] = self.password.sha256_hash(clear_password)
return 'http://{}:{}@{}:{}'.format(server['username'], server['password'],
server['ip'], server['port'])
else:
return 'http://{}:{}'.format(server['ip'], server['port']) |
def p_command(p):
'''command : simple_command
| shell_command
| shell_command redirection_list
| function_def
| coproc'''
if isinstance(p[1], ast.node):
p[0] = p[1]
if len(p) == 3:
assert p[0].kind == 'compound'
p[0].redirects.extend(p[2])
assert p[0].pos[0] < p[0].redirects[-1].pos[1]
p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1])
else:
p[0] = ast.node(kind='command', parts=p[1], pos=_partsspan(p[1])) | command : simple_command
| shell_command
| shell_command redirection_list
| function_def
| coproc | Below is the the instruction that describes the task:
### Input:
command : simple_command
| shell_command
| shell_command redirection_list
| function_def
| coproc
### Response:
def p_command(p):
'''command : simple_command
| shell_command
| shell_command redirection_list
| function_def
| coproc'''
if isinstance(p[1], ast.node):
p[0] = p[1]
if len(p) == 3:
assert p[0].kind == 'compound'
p[0].redirects.extend(p[2])
assert p[0].pos[0] < p[0].redirects[-1].pos[1]
p[0].pos = (p[0].pos[0], p[0].redirects[-1].pos[1])
else:
p[0] = ast.node(kind='command', parts=p[1], pos=_partsspan(p[1])) |
def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict | update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with | Below is the the instruction that describes the task:
### Input:
update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
### Response:
def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict |
def setup_multiifo_interval_coinc_inj(workflow, hdfbank, full_data_trig_files, inj_trig_files,
stat_files, background_file, veto_file, veto_name,
out_dir, pivot_ifo, fixed_ifo, tags=None):
"""
This function sets up exact match multiifo coincidence for injections
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence for injections')
if len(hdfbank) != 1:
raise ValueError('Must use exactly 1 bank file for this coincidence '
'method, I got %i !' % len(hdfbank))
hdfbank = hdfbank[0]
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence', 'parallelization-factor', tags))
ffiles = {}
ifiles = {}
for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('ifo')):
ffiles[ifo] = ffi[0]
for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('ifo')):
ifiles[ifo] = ifi[0]
injinj_files = FileList()
injfull_files = FileList()
fullinj_files = FileList()
# For the injfull and fullinj separation we take the pivot_ifo on one side,
# and the rest that are attached to the fixed_ifo on the other side
for ifo in ifiles: # ifiles is keyed on ifo
if ifo == pivot_ifo:
injinj_files.append(ifiles[ifo])
injfull_files.append(ifiles[ifo])
fullinj_files.append(ffiles[ifo])
else:
injinj_files.append(ifiles[ifo])
injfull_files.append(ffiles[ifo])
fullinj_files.append(ifiles[ifo])
combo = [(injinj_files, "injinj"),
(injfull_files, "injfull"),
(fullinj_files, "fullinj"),
]
bg_files = {'injinj':[], 'injfull':[], 'fullinj':[]}
for trig_files, ctag in combo:
findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp,
'multiifo_coinc',
ifos=ifiles.keys(),
tags=tags + [ctag],
out_dir=out_dir)
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=[veto_name, str(i)])
bg_files[ctag] += coinc_node.output_files
workflow.add_node(coinc_node)
logging.info('...leaving coincidence for injections')
return setup_multiifo_statmap_inj(workflow, ifiles.keys(), bg_files, background_file, out_dir, tags=tags + [veto_name]) | This function sets up exact match multiifo coincidence for injections | Below is the the instruction that describes the task:
### Input:
This function sets up exact match multiifo coincidence for injections
### Response:
def setup_multiifo_interval_coinc_inj(workflow, hdfbank, full_data_trig_files, inj_trig_files,
stat_files, background_file, veto_file, veto_name,
out_dir, pivot_ifo, fixed_ifo, tags=None):
"""
This function sets up exact match multiifo coincidence for injections
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence for injections')
if len(hdfbank) != 1:
raise ValueError('Must use exactly 1 bank file for this coincidence '
'method, I got %i !' % len(hdfbank))
hdfbank = hdfbank[0]
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence', 'parallelization-factor', tags))
ffiles = {}
ifiles = {}
for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('ifo')):
ffiles[ifo] = ffi[0]
for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('ifo')):
ifiles[ifo] = ifi[0]
injinj_files = FileList()
injfull_files = FileList()
fullinj_files = FileList()
# For the injfull and fullinj separation we take the pivot_ifo on one side,
# and the rest that are attached to the fixed_ifo on the other side
for ifo in ifiles: # ifiles is keyed on ifo
if ifo == pivot_ifo:
injinj_files.append(ifiles[ifo])
injfull_files.append(ifiles[ifo])
fullinj_files.append(ffiles[ifo])
else:
injinj_files.append(ifiles[ifo])
injfull_files.append(ffiles[ifo])
fullinj_files.append(ifiles[ifo])
combo = [(injinj_files, "injinj"),
(injfull_files, "injfull"),
(fullinj_files, "fullinj"),
]
bg_files = {'injinj':[], 'injfull':[], 'fullinj':[]}
for trig_files, ctag in combo:
findcoinc_exe = PyCBCFindMultiifoCoincExecutable(workflow.cp,
'multiifo_coinc',
ifos=ifiles.keys(),
tags=tags + [ctag],
out_dir=out_dir)
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=[veto_name, str(i)])
bg_files[ctag] += coinc_node.output_files
workflow.add_node(coinc_node)
logging.info('...leaving coincidence for injections')
return setup_multiifo_statmap_inj(workflow, ifiles.keys(), bg_files, background_file, out_dir, tags=tags + [veto_name]) |
def _get_format_from_filename(file, mode):
"""Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object).
"""
format = ''
file = getattr(file, 'name', file)
try:
# This raises an exception if file is not a (Unicode/byte) string:
format = _os.path.splitext(file)[-1][1:]
# Convert bytes to unicode (raises AttributeError on Python 3 str):
format = format.decode('utf-8', 'replace')
except Exception:
pass
if format.upper() not in _formats and 'r' not in mode:
raise TypeError("No format specified and unable to get format from "
"file extension: {0!r}".format(file))
return format | Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object). | Below is the the instruction that describes the task:
### Input:
Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object).
### Response:
def _get_format_from_filename(file, mode):
"""Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object).
"""
format = ''
file = getattr(file, 'name', file)
try:
# This raises an exception if file is not a (Unicode/byte) string:
format = _os.path.splitext(file)[-1][1:]
# Convert bytes to unicode (raises AttributeError on Python 3 str):
format = format.decode('utf-8', 'replace')
except Exception:
pass
if format.upper() not in _formats and 'r' not in mode:
raise TypeError("No format specified and unable to get format from "
"file extension: {0!r}".format(file))
return format |
def create(cls, url, databases):
"""
If databases is an empty list, all databases present in the server will become accessible via the endpoint,
with the _system database being the default database.
If databases is non-empty, only the specified databases will become available via the endpoint.
The first database name in the databases list will also become the default database for the endpoint.
The default database will always be used if a request coming in on the endpoint does not specify
the database name explicitly.
*Note*: adding or reconfiguring endpoints is allowed in the system database only.
Calling this action in any other database will make the server return an error.
Adding SSL endpoints at runtime is only supported if the server was started with SSL
properly configured (e.g. --server.keyfile must have been set).
:param url the endpoint specification, e.g. tcp://127.0.0.1:8530
:param databases a list of database names the endpoint is responsible for.
"""
api = Client.instance().api
result = api.endpoint.post(data={
'endpoint': url,
'databases': databases,
})
return result | If databases is an empty list, all databases present in the server will become accessible via the endpoint,
with the _system database being the default database.
If databases is non-empty, only the specified databases will become available via the endpoint.
The first database name in the databases list will also become the default database for the endpoint.
The default database will always be used if a request coming in on the endpoint does not specify
the database name explicitly.
*Note*: adding or reconfiguring endpoints is allowed in the system database only.
Calling this action in any other database will make the server return an error.
Adding SSL endpoints at runtime is only supported if the server was started with SSL
properly configured (e.g. --server.keyfile must have been set).
:param url the endpoint specification, e.g. tcp://127.0.0.1:8530
:param databases a list of database names the endpoint is responsible for. | Below is the the instruction that describes the task:
### Input:
If databases is an empty list, all databases present in the server will become accessible via the endpoint,
with the _system database being the default database.
If databases is non-empty, only the specified databases will become available via the endpoint.
The first database name in the databases list will also become the default database for the endpoint.
The default database will always be used if a request coming in on the endpoint does not specify
the database name explicitly.
*Note*: adding or reconfiguring endpoints is allowed in the system database only.
Calling this action in any other database will make the server return an error.
Adding SSL endpoints at runtime is only supported if the server was started with SSL
properly configured (e.g. --server.keyfile must have been set).
:param url the endpoint specification, e.g. tcp://127.0.0.1:8530
:param databases a list of database names the endpoint is responsible for.
### Response:
def create(cls, url, databases):
"""
If databases is an empty list, all databases present in the server will become accessible via the endpoint,
with the _system database being the default database.
If databases is non-empty, only the specified databases will become available via the endpoint.
The first database name in the databases list will also become the default database for the endpoint.
The default database will always be used if a request coming in on the endpoint does not specify
the database name explicitly.
*Note*: adding or reconfiguring endpoints is allowed in the system database only.
Calling this action in any other database will make the server return an error.
Adding SSL endpoints at runtime is only supported if the server was started with SSL
properly configured (e.g. --server.keyfile must have been set).
:param url the endpoint specification, e.g. tcp://127.0.0.1:8530
:param databases a list of database names the endpoint is responsible for.
"""
api = Client.instance().api
result = api.endpoint.post(data={
'endpoint': url,
'databases': databases,
})
return result |
def _post(self, *args, **kwargs):
"""
A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
response = requests.post(*args, **kwargs)
response.raise_for_status() | A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict | Below is the the instruction that describes the task:
### Input:
A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict
### Response:
def _post(self, *args, **kwargs):
"""
A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
response = requests.post(*args, **kwargs)
response.raise_for_status() |
def is_dir(self, remote_path):
"""Checks is the remote resource directory.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: True in case the remote resource is directory and False otherwise.
"""
urn = Urn(remote_path)
parent_urn = Urn(urn.parent())
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = self.execute_request(action='info', path=parent_urn.quote())
path = self.get_full_path(urn)
return WebDavXmlUtils.parse_is_dir_response(content=response.content, path=path, hostname=self.webdav.hostname) | Checks is the remote resource directory.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: True in case the remote resource is directory and False otherwise. | Below is the the instruction that describes the task:
### Input:
Checks is the remote resource directory.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: True in case the remote resource is directory and False otherwise.
### Response:
def is_dir(self, remote_path):
"""Checks is the remote resource directory.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: True in case the remote resource is directory and False otherwise.
"""
urn = Urn(remote_path)
parent_urn = Urn(urn.parent())
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = self.execute_request(action='info', path=parent_urn.quote())
path = self.get_full_path(urn)
return WebDavXmlUtils.parse_is_dir_response(content=response.content, path=path, hostname=self.webdav.hostname) |
def fcoe_networks(self):
"""
Gets the FcoeNetworks API client.
Returns:
FcoeNetworks:
"""
if not self.__fcoe_networks:
self.__fcoe_networks = FcoeNetworks(self.__connection)
return self.__fcoe_networks | Gets the FcoeNetworks API client.
Returns:
FcoeNetworks: | Below is the the instruction that describes the task:
### Input:
Gets the FcoeNetworks API client.
Returns:
FcoeNetworks:
### Response:
def fcoe_networks(self):
"""
Gets the FcoeNetworks API client.
Returns:
FcoeNetworks:
"""
if not self.__fcoe_networks:
self.__fcoe_networks = FcoeNetworks(self.__connection)
return self.__fcoe_networks |
def register_instance(self, instance: Any, allow_dotted_names: bool=False):
"""注册一个实例用于执行,注意只能注册一个.
Parameters:
instance (Any): - 将一个类的实例注册到rpc
allow_dotted_names (bool): 是否允许带`.`号的名字
"""
if self.instance:
raise RuntimeError("can only register one instance")
self.instance = instance
self.allow_dotted_names = allow_dotted_names
return True | 注册一个实例用于执行,注意只能注册一个.
Parameters:
instance (Any): - 将一个类的实例注册到rpc
allow_dotted_names (bool): 是否允许带`.`号的名字 | Below is the the instruction that describes the task:
### Input:
注册一个实例用于执行,注意只能注册一个.
Parameters:
instance (Any): - 将一个类的实例注册到rpc
allow_dotted_names (bool): 是否允许带`.`号的名字
### Response:
def register_instance(self, instance: Any, allow_dotted_names: bool=False):
"""注册一个实例用于执行,注意只能注册一个.
Parameters:
instance (Any): - 将一个类的实例注册到rpc
allow_dotted_names (bool): 是否允许带`.`号的名字
"""
if self.instance:
raise RuntimeError("can only register one instance")
self.instance = instance
self.allow_dotted_names = allow_dotted_names
return True |
def tag(self, tag):
'''
.. seealso:: :attr:`tag`
'''
t = self.tag
if t:
if not tag:
tag = t[-1]
if tag in t:
self._checkout(treeish=tag) | .. seealso:: :attr:`tag` | Below is the the instruction that describes the task:
### Input:
.. seealso:: :attr:`tag`
### Response:
def tag(self, tag):
'''
.. seealso:: :attr:`tag`
'''
t = self.tag
if t:
if not tag:
tag = t[-1]
if tag in t:
self._checkout(treeish=tag) |
def copy(self):
"""Returns a new :class:`~pyinter.Interval` object with the same bounds and values."""
return Interval(self._lower, self._lower_value, self._upper_value, self._upper) | Returns a new :class:`~pyinter.Interval` object with the same bounds and values. | Below is the the instruction that describes the task:
### Input:
Returns a new :class:`~pyinter.Interval` object with the same bounds and values.
### Response:
def copy(self):
"""Returns a new :class:`~pyinter.Interval` object with the same bounds and values."""
return Interval(self._lower, self._lower_value, self._upper_value, self._upper) |
def create_relay(self, orgid, data):
"""Create relay settings"""
return self.api_call(
ENDPOINTS['relays']['new'],
dict(orgid=orgid), body=data) | Create relay settings | Below is the the instruction that describes the task:
### Input:
Create relay settings
### Response:
def create_relay(self, orgid, data):
"""Create relay settings"""
return self.api_call(
ENDPOINTS['relays']['new'],
dict(orgid=orgid), body=data) |
def _set_label(label, mark, dim, **kwargs):
"""Helper function to set labels for an axis
"""
if mark is None:
mark = _context['last_mark']
if mark is None:
return {}
fig = kwargs.get('figure', current_figure())
scales = mark.scales
scale_metadata = mark.scales_metadata.get(dim, {})
scale = scales.get(dim, None)
if scale is None:
return
dimension = scale_metadata.get('dimension', scales[dim])
axis = _fetch_axis(fig, dimension, scales[dim])
if axis is not None:
_apply_properties(axis, {'label': label}) | Helper function to set labels for an axis | Below is the the instruction that describes the task:
### Input:
Helper function to set labels for an axis
### Response:
def _set_label(label, mark, dim, **kwargs):
"""Helper function to set labels for an axis
"""
if mark is None:
mark = _context['last_mark']
if mark is None:
return {}
fig = kwargs.get('figure', current_figure())
scales = mark.scales
scale_metadata = mark.scales_metadata.get(dim, {})
scale = scales.get(dim, None)
if scale is None:
return
dimension = scale_metadata.get('dimension', scales[dim])
axis = _fetch_axis(fig, dimension, scales[dim])
if axis is not None:
_apply_properties(axis, {'label': label}) |
def addNewTopology(self, state_manager, topologyName):
"""
Adds a topology in the local cache, and sets a watch
on any changes on the topology.
"""
topology = Topology(topologyName, state_manager.name)
Log.info("Adding new topology: %s, state_manager: %s",
topologyName, state_manager.name)
self.topologies.append(topology)
# Register a watch on topology and change
# the topologyInfo on any new change.
topology.register_watch(self.setTopologyInfo)
def on_topology_pplan(data):
"""watch physical plan"""
Log.info("Watch triggered for topology pplan: " + topologyName)
topology.set_physical_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_packing_plan(data):
"""watch packing plan"""
Log.info("Watch triggered for topology packing plan: " + topologyName)
topology.set_packing_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_execution_state(data):
"""watch execution state"""
Log.info("Watch triggered for topology execution state: " + topologyName)
topology.set_execution_state(data)
if not data:
Log.debug("No data to be set")
def on_topology_tmaster(data):
"""set tmaster"""
Log.info("Watch triggered for topology tmaster: " + topologyName)
topology.set_tmaster(data)
if not data:
Log.debug("No data to be set")
def on_topology_scheduler_location(data):
"""set scheduler location"""
Log.info("Watch triggered for topology scheduler location: " + topologyName)
topology.set_scheduler_location(data)
if not data:
Log.debug("No data to be set")
# Set watches on the pplan, execution_state, tmaster and scheduler_location.
state_manager.get_pplan(topologyName, on_topology_pplan)
state_manager.get_packing_plan(topologyName, on_topology_packing_plan)
state_manager.get_execution_state(topologyName, on_topology_execution_state)
state_manager.get_tmaster(topologyName, on_topology_tmaster)
state_manager.get_scheduler_location(topologyName, on_topology_scheduler_location) | Adds a topology in the local cache, and sets a watch
on any changes on the topology. | Below is the the instruction that describes the task:
### Input:
Adds a topology in the local cache, and sets a watch
on any changes on the topology.
### Response:
def addNewTopology(self, state_manager, topologyName):
"""
Adds a topology in the local cache, and sets a watch
on any changes on the topology.
"""
topology = Topology(topologyName, state_manager.name)
Log.info("Adding new topology: %s, state_manager: %s",
topologyName, state_manager.name)
self.topologies.append(topology)
# Register a watch on topology and change
# the topologyInfo on any new change.
topology.register_watch(self.setTopologyInfo)
def on_topology_pplan(data):
"""watch physical plan"""
Log.info("Watch triggered for topology pplan: " + topologyName)
topology.set_physical_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_packing_plan(data):
"""watch packing plan"""
Log.info("Watch triggered for topology packing plan: " + topologyName)
topology.set_packing_plan(data)
if not data:
Log.debug("No data to be set")
def on_topology_execution_state(data):
"""watch execution state"""
Log.info("Watch triggered for topology execution state: " + topologyName)
topology.set_execution_state(data)
if not data:
Log.debug("No data to be set")
def on_topology_tmaster(data):
"""set tmaster"""
Log.info("Watch triggered for topology tmaster: " + topologyName)
topology.set_tmaster(data)
if not data:
Log.debug("No data to be set")
def on_topology_scheduler_location(data):
"""set scheduler location"""
Log.info("Watch triggered for topology scheduler location: " + topologyName)
topology.set_scheduler_location(data)
if not data:
Log.debug("No data to be set")
# Set watches on the pplan, execution_state, tmaster and scheduler_location.
state_manager.get_pplan(topologyName, on_topology_pplan)
state_manager.get_packing_plan(topologyName, on_topology_packing_plan)
state_manager.get_execution_state(topologyName, on_topology_execution_state)
state_manager.get_tmaster(topologyName, on_topology_tmaster)
state_manager.get_scheduler_location(topologyName, on_topology_scheduler_location) |
def star(args):
"""
%prog star folder reference
Run star on a folder with reads.
"""
p = OptionParser(star.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end mapping")
p.set_fastq_names()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, reference = args
cpus = opts.cpus
mm = MakeManager()
num = 1 if opts.single else 2
folder, reference = args
gd = "GenomeDir"
mkdir(gd)
STAR = "STAR --runThreadN {0} --genomeDir {1}".format(cpus, gd)
# Step 0: build genome index
genomeidx = op.join(gd, "Genome")
if need_update(reference, genomeidx):
cmd = STAR + " --runMode genomeGenerate"
cmd += " --genomeFastaFiles {0}".format(reference)
mm.add(reference, genomeidx, cmd)
# Step 1: align
for p, prefix in iter_project(folder, opts.names, num):
pf = "{0}_star".format(prefix)
bamfile = pf + "Aligned.sortedByCoord.out.bam"
cmd = STAR + " --readFilesIn {0}".format(" ".join(p))
if p[0].endswith(".gz"):
cmd += " --readFilesCommand zcat"
cmd += " --outSAMtype BAM SortedByCoordinate"
cmd += " --outFileNamePrefix {0}".format(pf)
cmd += " --twopassMode Basic"
# Compatibility for cufflinks
cmd += " --outSAMstrandField intronMotif"
cmd += " --outFilterIntronMotifs RemoveNoncanonical"
mm.add(p, bamfile, cmd)
mm.write() | %prog star folder reference
Run star on a folder with reads. | Below is the the instruction that describes the task:
### Input:
%prog star folder reference
Run star on a folder with reads.
### Response:
def star(args):
"""
%prog star folder reference
Run star on a folder with reads.
"""
p = OptionParser(star.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end mapping")
p.set_fastq_names()
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
folder, reference = args
cpus = opts.cpus
mm = MakeManager()
num = 1 if opts.single else 2
folder, reference = args
gd = "GenomeDir"
mkdir(gd)
STAR = "STAR --runThreadN {0} --genomeDir {1}".format(cpus, gd)
# Step 0: build genome index
genomeidx = op.join(gd, "Genome")
if need_update(reference, genomeidx):
cmd = STAR + " --runMode genomeGenerate"
cmd += " --genomeFastaFiles {0}".format(reference)
mm.add(reference, genomeidx, cmd)
# Step 1: align
for p, prefix in iter_project(folder, opts.names, num):
pf = "{0}_star".format(prefix)
bamfile = pf + "Aligned.sortedByCoord.out.bam"
cmd = STAR + " --readFilesIn {0}".format(" ".join(p))
if p[0].endswith(".gz"):
cmd += " --readFilesCommand zcat"
cmd += " --outSAMtype BAM SortedByCoordinate"
cmd += " --outFileNamePrefix {0}".format(pf)
cmd += " --twopassMode Basic"
# Compatibility for cufflinks
cmd += " --outSAMstrandField intronMotif"
cmd += " --outFilterIntronMotifs RemoveNoncanonical"
mm.add(p, bamfile, cmd)
mm.write() |
def make_filled_array(shp, dtype, order, r, g, b, a):
"""Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array.
"""
# TODO: can we make this more efficient?
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype is np.uint8 and len(bgtup) == 4:
# optimiztion when dealing with 32-bit RGBA arrays
fill_val = np.array(bgtup, dtype=dtype).view(np.uint32)
rgba = np.zeros(shp, dtype=dtype)
rgba_i = rgba.view(np.uint32)
rgba_i[:] = fill_val
return rgba
return np.full(shp, bgtup, dtype=dtype) | Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array. | Below is the the instruction that describes the task:
### Input:
Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array.
### Response:
def make_filled_array(shp, dtype, order, r, g, b, a):
"""Return a filled array with a color value. order defines the color
planes in the array. (r, g, b, a) are expected to be in the range
0..1 and are scaled to the appropriate values.
shp can define a 2D or 3D array.
"""
# TODO: can we make this more efficient?
maxv = np.iinfo(dtype).max
bgval = dict(A=int(maxv * a), R=int(maxv * r), G=int(maxv * g),
B=int(maxv * b))
bgtup = tuple([bgval[order[i]] for i in range(len(order))])
if dtype is np.uint8 and len(bgtup) == 4:
# optimiztion when dealing with 32-bit RGBA arrays
fill_val = np.array(bgtup, dtype=dtype).view(np.uint32)
rgba = np.zeros(shp, dtype=dtype)
rgba_i = rgba.view(np.uint32)
rgba_i[:] = fill_val
return rgba
return np.full(shp, bgtup, dtype=dtype) |
def get_timefactor(cls) -> float:
"""Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
"""
try:
parfactor = hydpy.pub.timegrids.parfactor
except RuntimeError:
if not (cls.parameterstep and cls.simulationstep):
raise RuntimeError(
f'To calculate the conversion factor for adapting '
f'the values of the time-dependent parameters, '
f'you need to define both a parameter and a simulation '
f'time step size first.')
else:
date1 = timetools.Date('2000.01.01')
date2 = date1 + cls.simulationstep
parfactor = timetools.Timegrids(timetools.Timegrid(
date1, date2, cls.simulationstep)).parfactor
return parfactor(cls.parameterstep) | Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5 | Below is the the instruction that describes the task:
### Input:
Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
### Response:
def get_timefactor(cls) -> float:
"""Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
"""
try:
parfactor = hydpy.pub.timegrids.parfactor
except RuntimeError:
if not (cls.parameterstep and cls.simulationstep):
raise RuntimeError(
f'To calculate the conversion factor for adapting '
f'the values of the time-dependent parameters, '
f'you need to define both a parameter and a simulation '
f'time step size first.')
else:
date1 = timetools.Date('2000.01.01')
date2 = date1 + cls.simulationstep
parfactor = timetools.Timegrids(timetools.Timegrid(
date1, date2, cls.simulationstep)).parfactor
return parfactor(cls.parameterstep) |
def _build_app_dict(site, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in site._registry.items()
if m._meta.app_label == label
}
else:
models = site._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=site.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=site.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=site.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict | Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app. | Below is the the instruction that describes the task:
### Input:
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
### Response:
def _build_app_dict(site, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in site._registry.items()
if m._meta.app_label == label
}
else:
models = site._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=site.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=site.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=site.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict |
def reformat(found_sequences):
'''Truncate the FASTA headers so that the first field is a 4-character ID.'''
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == '>')
tokens = header.split('|')
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens) | Truncate the FASTA headers so that the first field is a 4-character ID. | Below is the the instruction that describes the task:
### Input:
Truncate the FASTA headers so that the first field is a 4-character ID.
### Response:
def reformat(found_sequences):
'''Truncate the FASTA headers so that the first field is a 4-character ID.'''
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == '>')
tokens = header.split('|')
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens) |
def _verify(leniency, numobj, candidate, matcher):
"""Returns True if number is a verified number according to the
leniency."""
if leniency == Leniency.POSSIBLE:
return is_possible_number(numobj)
elif leniency == Leniency.VALID:
if (not is_valid_number(numobj) or
not _contains_only_valid_x_chars(numobj, candidate)):
return False
return _is_national_prefix_present_if_required(numobj)
elif leniency == Leniency.STRICT_GROUPING:
return _verify_strict_grouping(numobj, candidate, matcher)
elif leniency == Leniency.EXACT_GROUPING:
return _verify_exact_grouping(numobj, candidate, matcher)
else:
raise Exception("Error: unsupported Leniency value %s" % leniency) | Returns True if number is a verified number according to the
leniency. | Below is the the instruction that describes the task:
### Input:
Returns True if number is a verified number according to the
leniency.
### Response:
def _verify(leniency, numobj, candidate, matcher):
"""Returns True if number is a verified number according to the
leniency."""
if leniency == Leniency.POSSIBLE:
return is_possible_number(numobj)
elif leniency == Leniency.VALID:
if (not is_valid_number(numobj) or
not _contains_only_valid_x_chars(numobj, candidate)):
return False
return _is_national_prefix_present_if_required(numobj)
elif leniency == Leniency.STRICT_GROUPING:
return _verify_strict_grouping(numobj, candidate, matcher)
elif leniency == Leniency.EXACT_GROUPING:
return _verify_exact_grouping(numobj, candidate, matcher)
else:
raise Exception("Error: unsupported Leniency value %s" % leniency) |
def set_inasafe_default_value_qsetting(
qsetting, category, inasafe_field_key, value):
"""Helper method to set inasafe default value to qsetting.
:param qsetting: QSettings.
:type qsetting: QSettings
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:param value: Value of the inasafe_default_value.
:type value: float, int
"""
key = 'inasafe/default_value/%s/%s' % (category, inasafe_field_key)
qsetting.setValue(key, value) | Helper method to set inasafe default value to qsetting.
:param qsetting: QSettings.
:type qsetting: QSettings
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:param value: Value of the inasafe_default_value.
:type value: float, int | Below is the the instruction that describes the task:
### Input:
Helper method to set inasafe default value to qsetting.
:param qsetting: QSettings.
:type qsetting: QSettings
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:param value: Value of the inasafe_default_value.
:type value: float, int
### Response:
def set_inasafe_default_value_qsetting(
qsetting, category, inasafe_field_key, value):
"""Helper method to set inasafe default value to qsetting.
:param qsetting: QSettings.
:type qsetting: QSettings
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:param value: Value of the inasafe_default_value.
:type value: float, int
"""
key = 'inasafe/default_value/%s/%s' % (category, inasafe_field_key)
qsetting.setValue(key, value) |
def show_as(**mappings):
"""
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
g.show_request_fields = mappings
g.show_response_fields = mappings
return func(*args, **kwargs)
return wrapper
return decorator | Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4()) | Below is the the instruction that describes the task:
### Input:
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
### Response:
def show_as(**mappings):
"""
Show a set of request and/or response fields in logs using a different key.
Example:
@show_as(id="foo_id")
def create_foo():
return Foo(id=uuid4())
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
g.show_request_fields = mappings
g.show_response_fields = mappings
return func(*args, **kwargs)
return wrapper
return decorator |
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
self.trigger_id = trigger_id
trigger = Wallabag.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
if title is not None:
# convert htmlentities
title = HtmlEntities(title).html_entity_decode
return self._create_entry(title, data, trigger.tag)
else:
# we ignore data without title so return True to let
# the process continue without
# raising exception
return True | let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
### Response:
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
self.trigger_id = trigger_id
trigger = Wallabag.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
if title is not None:
# convert htmlentities
title = HtmlEntities(title).html_entity_decode
return self._create_entry(title, data, trigger.tag)
else:
# we ignore data without title so return True to let
# the process continue without
# raising exception
return True |
def orderrun_detail(self, kitchen, pdict, return_all_data=False):
"""
api.add_resource(OrderDetailsV2, '/v2/order/details/<string:kitchenname>', methods=['POST'])
:param self: DKCloudAPI
:param kitchen: string
:param pdict: dict
:param return_all_data: boolean
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with kitchen')
return rc
url = '%s/v2/order/details/%s' % (self.get_url_for_direct_rest_call(),
kitchen)
try:
response = requests.post(url, data=json.dumps(pdict), headers=self._get_common_headers())
rdict = self._get_json(response)
if False:
import pickle
pickle.dump(rdict, open("files/orderrun_detail.p", "wb"))
pass
except (RequestException, ValueError), c:
s = "orderrun_detail: exception: %s" % str(c)
rc.set(rc.DK_FAIL, s)
return rc
if DKCloudAPI._valid_response(response):
if return_all_data is False:
rc.set(rc.DK_SUCCESS, None, rdict['servings'])
else:
rc.set(rc.DK_SUCCESS, None, rdict)
return rc
else:
arc = DKAPIReturnCode(rdict, response)
rc.set(rc.DK_FAIL, arc.get_message())
return rc | api.add_resource(OrderDetailsV2, '/v2/order/details/<string:kitchenname>', methods=['POST'])
:param self: DKCloudAPI
:param kitchen: string
:param pdict: dict
:param return_all_data: boolean
:rtype: DKReturnCode | Below is the the instruction that describes the task:
### Input:
api.add_resource(OrderDetailsV2, '/v2/order/details/<string:kitchenname>', methods=['POST'])
:param self: DKCloudAPI
:param kitchen: string
:param pdict: dict
:param return_all_data: boolean
:rtype: DKReturnCode
### Response:
def orderrun_detail(self, kitchen, pdict, return_all_data=False):
"""
api.add_resource(OrderDetailsV2, '/v2/order/details/<string:kitchenname>', methods=['POST'])
:param self: DKCloudAPI
:param kitchen: string
:param pdict: dict
:param return_all_data: boolean
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with kitchen')
return rc
url = '%s/v2/order/details/%s' % (self.get_url_for_direct_rest_call(),
kitchen)
try:
response = requests.post(url, data=json.dumps(pdict), headers=self._get_common_headers())
rdict = self._get_json(response)
if False:
import pickle
pickle.dump(rdict, open("files/orderrun_detail.p", "wb"))
pass
except (RequestException, ValueError), c:
s = "orderrun_detail: exception: %s" % str(c)
rc.set(rc.DK_FAIL, s)
return rc
if DKCloudAPI._valid_response(response):
if return_all_data is False:
rc.set(rc.DK_SUCCESS, None, rdict['servings'])
else:
rc.set(rc.DK_SUCCESS, None, rdict)
return rc
else:
arc = DKAPIReturnCode(rdict, response)
rc.set(rc.DK_FAIL, arc.get_message())
return rc |
def fill_missing(self, value=np.nan):
r"""Replace missing value to "value".
Parameters:
value: value that missing value is replaced
Returns:
Result
"""
return self.__class__(
self.mol,
[(value if is_missing(v) else v) for v in self.values()],
self.keys(),
) | r"""Replace missing value to "value".
Parameters:
value: value that missing value is replaced
Returns:
Result | Below is the the instruction that describes the task:
### Input:
r"""Replace missing value to "value".
Parameters:
value: value that missing value is replaced
Returns:
Result
### Response:
def fill_missing(self, value=np.nan):
r"""Replace missing value to "value".
Parameters:
value: value that missing value is replaced
Returns:
Result
"""
return self.__class__(
self.mol,
[(value if is_missing(v) else v) for v in self.values()],
self.keys(),
) |
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets | Subsets that make up each split of the dataset for the language pair. | Below is the the instruction that describes the task:
### Input:
Subsets that make up each split of the dataset for the language pair.
### Response:
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subsets[split] = []
for ss_name in ss_names:
ds = DATASET_MAP[ss_name]
if ds.target != target or source not in ds.sources:
logging.info(
"Skipping sub-dataset that does not include language pair: %s",
ss_name)
else:
filtered_subsets[split].append(ss_name)
logging.info("Using sub-datasets: %s", filtered_subsets)
return filtered_subsets |
def competence(s):
"""
The competence function for MatrixMetropolis
"""
# MatrixMetropolis handles the Wishart family, which are valued as
# _symmetric_ matrices.
if any([isinstance(s, cls)
for cls in [distributions.Wishart, distributions.WishartCov]]):
return 2
else:
return 0 | The competence function for MatrixMetropolis | Below is the the instruction that describes the task:
### Input:
The competence function for MatrixMetropolis
### Response:
def competence(s):
"""
The competence function for MatrixMetropolis
"""
# MatrixMetropolis handles the Wishart family, which are valued as
# _symmetric_ matrices.
if any([isinstance(s, cls)
for cls in [distributions.Wishart, distributions.WishartCov]]):
return 2
else:
return 0 |
def crust_type_at(lat=None, lon=None):
"""
lat, lon (degrees)
"""
# Get lon into appropriate format
lats = np.array(lat)
lons = np.array(lon%360)
iVals = ((90.0-lats)%180).astype(np.int)
jVals = (lons%360.0).astype(int)
# i = int((-lat+90.0)%180)
# j = int(lon)
t = _c1_crust_type_lat_lon[iVals,jVals]
# t = _c1_crust_type_lat_lon[i,j]
# des = litho.c1_region_descriptor[t]
return t
return t | lat, lon (degrees) | Below is the the instruction that describes the task:
### Input:
lat, lon (degrees)
### Response:
def crust_type_at(lat=None, lon=None):
"""
lat, lon (degrees)
"""
# Get lon into appropriate format
lats = np.array(lat)
lons = np.array(lon%360)
iVals = ((90.0-lats)%180).astype(np.int)
jVals = (lons%360.0).astype(int)
# i = int((-lat+90.0)%180)
# j = int(lon)
t = _c1_crust_type_lat_lon[iVals,jVals]
# t = _c1_crust_type_lat_lon[i,j]
# des = litho.c1_region_descriptor[t]
return t
return t |
def process_runner(quantity=1, queue=None, backend=None):
'''
Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5
'''
queue_kwargs = __get_queue_opts(queue=queue, backend=backend)
data = process_queue(quantity=quantity, is_runner=True, **queue_kwargs)
for job in data['items']:
__salt__[job['fun']](*job['args'], **job['kwargs']) | Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5 | Below is the the instruction that describes the task:
### Input:
Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5
### Response:
def process_runner(quantity=1, queue=None, backend=None):
'''
Process queued runners
quantity
number of runners to process
queue
queue to insert the runner reference into
backend
backend that to use for the queue
CLI Example:
.. code-block:: bash
salt-run queue.process_runner
salt-run queue.process_runner 5
'''
queue_kwargs = __get_queue_opts(queue=queue, backend=backend)
data = process_queue(quantity=quantity, is_runner=True, **queue_kwargs)
for job in data['items']:
__salt__[job['fun']](*job['args'], **job['kwargs']) |
def draw_MM0(self):
"""
Draws the M/M0 plot in the GUI on canvas3
"""
self.fig3.clf()
self.fig3.text(0.02, 0.96, 'M/M0', {'family': self.font_type, 'fontsize': 10 *
self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
self.mplot = self.fig3.add_axes(
[0.2, 0.15, 0.7, 0.7], frameon=True, facecolor='None')
thermal_x, thermal_y = [], []
thermal_x_bad, thermal_y_bad = [], []
af_x, af_y = [], []
af_x_bad, af_y_bad = [], []
for i in range(len(self.Data[self.s]['zijdblock'])):
step = self.Data[self.s]['zijdblock_steps'][i]
# bad point
if self.Data[self.s]['measurement_flag'][i] == 'b':
if step == "0":
thermal_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
af_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
af_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "C" in step:
thermal_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "T" in step:
af_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
af_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
else:
continue
else:
if step == "0":
thermal_x.append(self.Data[self.s]['zijdblock'][i][0])
af_x.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
af_y.append(self.Data[self.s]['zijdblock'][i]
[3]/self.Data[self.s]['zijdblock'][0][3])
elif "C" in step:
thermal_x.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "T" in step:
af_x.append(self.Data[self.s]['zijdblock'][i][0])
af_y.append(self.Data[self.s]['zijdblock'][i]
[3]/self.Data[self.s]['zijdblock'][0][3])
else:
continue
if len(thermal_x)+len(thermal_x_bad) > self.Data[self.s]['zijdblock_steps'].count('0'):
self.mplot.plot(thermal_x, thermal_y, 'ro-',
markersize=self.MS, lw=1, clip_on=False, zorder=1)
for i in range(len(thermal_x_bad)):
self.mplot.plot([thermal_x_bad[i]], [thermal_y_bad[i]], 'o',
mfc='None', mec='k', markersize=self.MS, clip_on=False, zorder=1)
self.mplot.set_xlabel('Thermal (C)', color='r')
for tl in self.mplot.get_xticklabels():
tl.set_color('r')
self.mplot_af = self.mplot.twiny()
if len(af_x)+len(af_x_bad) > self.Data[self.s]['zijdblock_steps'].count('0'):
self.mplot_af.plot(
af_x, af_y, 'bo-', markersize=self.MS, lw=1, clip_on=False, zorder=1)
for i in range(len(af_x_bad)):
self.mplot_af.plot([af_x_bad[i]], [
af_y_bad[i]], 'o', mfc='None', mec='k', markersize=self.MS, clip_on=False, zorder=1)
self.mplot_af.set_xlabel('AF (mT)', color='b')
for tl in self.mplot_af.get_xticklabels():
tl.set_color('b')
self.mplot.tick_params(axis='both', which='major', labelsize=7)
self.mplot_af.tick_params(axis='both', which='major', labelsize=7)
self.mplot.spines["right"].set_visible(False)
self.mplot_af.spines["right"].set_visible(False)
self.mplot.get_xaxis().tick_bottom()
self.mplot.get_yaxis().tick_left()
self.mplot.set_ylabel("M / NRM0", fontsize=8*self.GUI_RESOLUTION)
self.canvas3.draw() | Draws the M/M0 plot in the GUI on canvas3 | Below is the the instruction that describes the task:
### Input:
Draws the M/M0 plot in the GUI on canvas3
### Response:
def draw_MM0(self):
"""
Draws the M/M0 plot in the GUI on canvas3
"""
self.fig3.clf()
self.fig3.text(0.02, 0.96, 'M/M0', {'family': self.font_type, 'fontsize': 10 *
self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
self.mplot = self.fig3.add_axes(
[0.2, 0.15, 0.7, 0.7], frameon=True, facecolor='None')
thermal_x, thermal_y = [], []
thermal_x_bad, thermal_y_bad = [], []
af_x, af_y = [], []
af_x_bad, af_y_bad = [], []
for i in range(len(self.Data[self.s]['zijdblock'])):
step = self.Data[self.s]['zijdblock_steps'][i]
# bad point
if self.Data[self.s]['measurement_flag'][i] == 'b':
if step == "0":
thermal_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
af_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
af_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "C" in step:
thermal_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "T" in step:
af_x_bad.append(self.Data[self.s]['zijdblock'][i][0])
af_y_bad.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
else:
continue
else:
if step == "0":
thermal_x.append(self.Data[self.s]['zijdblock'][i][0])
af_x.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
af_y.append(self.Data[self.s]['zijdblock'][i]
[3]/self.Data[self.s]['zijdblock'][0][3])
elif "C" in step:
thermal_x.append(self.Data[self.s]['zijdblock'][i][0])
thermal_y.append(
self.Data[self.s]['zijdblock'][i][3]/self.Data[self.s]['zijdblock'][0][3])
elif "T" in step:
af_x.append(self.Data[self.s]['zijdblock'][i][0])
af_y.append(self.Data[self.s]['zijdblock'][i]
[3]/self.Data[self.s]['zijdblock'][0][3])
else:
continue
if len(thermal_x)+len(thermal_x_bad) > self.Data[self.s]['zijdblock_steps'].count('0'):
self.mplot.plot(thermal_x, thermal_y, 'ro-',
markersize=self.MS, lw=1, clip_on=False, zorder=1)
for i in range(len(thermal_x_bad)):
self.mplot.plot([thermal_x_bad[i]], [thermal_y_bad[i]], 'o',
mfc='None', mec='k', markersize=self.MS, clip_on=False, zorder=1)
self.mplot.set_xlabel('Thermal (C)', color='r')
for tl in self.mplot.get_xticklabels():
tl.set_color('r')
self.mplot_af = self.mplot.twiny()
if len(af_x)+len(af_x_bad) > self.Data[self.s]['zijdblock_steps'].count('0'):
self.mplot_af.plot(
af_x, af_y, 'bo-', markersize=self.MS, lw=1, clip_on=False, zorder=1)
for i in range(len(af_x_bad)):
self.mplot_af.plot([af_x_bad[i]], [
af_y_bad[i]], 'o', mfc='None', mec='k', markersize=self.MS, clip_on=False, zorder=1)
self.mplot_af.set_xlabel('AF (mT)', color='b')
for tl in self.mplot_af.get_xticklabels():
tl.set_color('b')
self.mplot.tick_params(axis='both', which='major', labelsize=7)
self.mplot_af.tick_params(axis='both', which='major', labelsize=7)
self.mplot.spines["right"].set_visible(False)
self.mplot_af.spines["right"].set_visible(False)
self.mplot.get_xaxis().tick_bottom()
self.mplot.get_yaxis().tick_left()
self.mplot.set_ylabel("M / NRM0", fontsize=8*self.GUI_RESOLUTION)
self.canvas3.draw() |
def CMPXCHG8B(cpu, dest):
"""
Compares and exchanges bytes.
Compares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if
operand size is 128 bits) with the operand (destination operand). If
the values are equal, the 64-bit value in ECX:EBX (or 128-bit value in
RCX:RBX) is stored in the destination operand. Otherwise, the value in
the destination operand is loaded into EDX:EAX (or RDX:RAX)::
IF (64-Bit Mode and OperandSize = 64)
THEN
IF (RDX:RAX = DEST)
THEN
ZF = 1;
DEST = RCX:RBX;
ELSE
ZF = 0;
RDX:RAX = DEST;
FI
ELSE
IF (EDX:EAX = DEST)
THEN
ZF = 1;
DEST = ECX:EBX;
ELSE
ZF = 0;
EDX:EAX = DEST;
FI;
FI;
:param cpu: current CPU.
:param dest: destination operand.
"""
size = dest.size
cmp_reg_name_l = {64: 'EAX', 128: 'RAX'}[size]
cmp_reg_name_h = {64: 'EDX', 128: 'RDX'}[size]
src_reg_name_l = {64: 'EBX', 128: 'RBX'}[size]
src_reg_name_h = {64: 'ECX', 128: 'RCX'}[size]
# EDX:EAX or RDX:RAX
cmph = cpu.read_register(cmp_reg_name_h)
cmpl = cpu.read_register(cmp_reg_name_l)
srch = cpu.read_register(src_reg_name_h)
srcl = cpu.read_register(src_reg_name_l)
cmp0 = Operators.CONCAT(size, cmph, cmpl)
src0 = Operators.CONCAT(size, srch, srcl)
arg_dest = dest.read()
cpu.ZF = arg_dest == cmp0
dest.write(
Operators.ITEBV(size, cpu.ZF,
Operators.CONCAT(size, srch, srcl),
arg_dest)
)
cpu.write_register(cmp_reg_name_l, Operators.ITEBV(size // 2, cpu.ZF, cmpl,
Operators.EXTRACT(arg_dest, 0, size // 2)))
cpu.write_register(cmp_reg_name_h, Operators.ITEBV(size // 2, cpu.ZF, cmph,
Operators.EXTRACT(arg_dest, size // 2, size // 2))) | Compares and exchanges bytes.
Compares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if
operand size is 128 bits) with the operand (destination operand). If
the values are equal, the 64-bit value in ECX:EBX (or 128-bit value in
RCX:RBX) is stored in the destination operand. Otherwise, the value in
the destination operand is loaded into EDX:EAX (or RDX:RAX)::
IF (64-Bit Mode and OperandSize = 64)
THEN
IF (RDX:RAX = DEST)
THEN
ZF = 1;
DEST = RCX:RBX;
ELSE
ZF = 0;
RDX:RAX = DEST;
FI
ELSE
IF (EDX:EAX = DEST)
THEN
ZF = 1;
DEST = ECX:EBX;
ELSE
ZF = 0;
EDX:EAX = DEST;
FI;
FI;
:param cpu: current CPU.
:param dest: destination operand. | Below is the the instruction that describes the task:
### Input:
Compares and exchanges bytes.
Compares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if
operand size is 128 bits) with the operand (destination operand). If
the values are equal, the 64-bit value in ECX:EBX (or 128-bit value in
RCX:RBX) is stored in the destination operand. Otherwise, the value in
the destination operand is loaded into EDX:EAX (or RDX:RAX)::
IF (64-Bit Mode and OperandSize = 64)
THEN
IF (RDX:RAX = DEST)
THEN
ZF = 1;
DEST = RCX:RBX;
ELSE
ZF = 0;
RDX:RAX = DEST;
FI
ELSE
IF (EDX:EAX = DEST)
THEN
ZF = 1;
DEST = ECX:EBX;
ELSE
ZF = 0;
EDX:EAX = DEST;
FI;
FI;
:param cpu: current CPU.
:param dest: destination operand.
### Response:
def CMPXCHG8B(cpu, dest):
"""
Compares and exchanges bytes.
Compares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if
operand size is 128 bits) with the operand (destination operand). If
the values are equal, the 64-bit value in ECX:EBX (or 128-bit value in
RCX:RBX) is stored in the destination operand. Otherwise, the value in
the destination operand is loaded into EDX:EAX (or RDX:RAX)::
IF (64-Bit Mode and OperandSize = 64)
THEN
IF (RDX:RAX = DEST)
THEN
ZF = 1;
DEST = RCX:RBX;
ELSE
ZF = 0;
RDX:RAX = DEST;
FI
ELSE
IF (EDX:EAX = DEST)
THEN
ZF = 1;
DEST = ECX:EBX;
ELSE
ZF = 0;
EDX:EAX = DEST;
FI;
FI;
:param cpu: current CPU.
:param dest: destination operand.
"""
size = dest.size
cmp_reg_name_l = {64: 'EAX', 128: 'RAX'}[size]
cmp_reg_name_h = {64: 'EDX', 128: 'RDX'}[size]
src_reg_name_l = {64: 'EBX', 128: 'RBX'}[size]
src_reg_name_h = {64: 'ECX', 128: 'RCX'}[size]
# EDX:EAX or RDX:RAX
cmph = cpu.read_register(cmp_reg_name_h)
cmpl = cpu.read_register(cmp_reg_name_l)
srch = cpu.read_register(src_reg_name_h)
srcl = cpu.read_register(src_reg_name_l)
cmp0 = Operators.CONCAT(size, cmph, cmpl)
src0 = Operators.CONCAT(size, srch, srcl)
arg_dest = dest.read()
cpu.ZF = arg_dest == cmp0
dest.write(
Operators.ITEBV(size, cpu.ZF,
Operators.CONCAT(size, srch, srcl),
arg_dest)
)
cpu.write_register(cmp_reg_name_l, Operators.ITEBV(size // 2, cpu.ZF, cmpl,
Operators.EXTRACT(arg_dest, 0, size // 2)))
cpu.write_register(cmp_reg_name_h, Operators.ITEBV(size // 2, cpu.ZF, cmph,
Operators.EXTRACT(arg_dest, size // 2, size // 2))) |
def filter_direct(stmts_in, **kwargs):
"""Filter to statements that are direct interactions
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
def get_is_direct(stmt):
"""Returns true if there is evidence that the statement is a direct
interaction.
If any of the evidences associated with the statement
indicates a direct interatcion then we assume the interaction
is direct. If there is no evidence for the interaction being indirect
then we default to direct.
"""
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
logger.info('Filtering %d statements to direct ones...' % len(stmts_in))
stmts_out = []
for st in stmts_in:
if get_is_direct(st):
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter to statements that are direct interactions
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. | Below is the the instruction that describes the task:
### Input:
Filter to statements that are direct interactions
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
### Response:
def filter_direct(stmts_in, **kwargs):
"""Filter to statements that are direct interactions
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
def get_is_direct(stmt):
"""Returns true if there is evidence that the statement is a direct
interaction.
If any of the evidences associated with the statement
indicates a direct interatcion then we assume the interaction
is direct. If there is no evidence for the interaction being indirect
then we default to direct.
"""
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
logger.info('Filtering %d statements to direct ones...' % len(stmts_in))
stmts_out = []
for st in stmts_in:
if get_is_direct(st):
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.