code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def send_response(self, msgid, response):
"""Send a response."""
msg = dumps([2, msgid, response])
self.send(msg) | Send a response. | Below is the the instruction that describes the task:
### Input:
Send a response.
### Response:
def send_response(self, msgid, response):
"""Send a response."""
msg = dumps([2, msgid, response])
self.send(msg) |
def open(self, input_id, **kwargs):
"""
Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data within tile
"""
if not isinstance(input_id, str):
return input_id.open(self.tile, **kwargs)
if input_id not in self.params["input"]:
raise ValueError("%s not found in config as input file" % input_id)
return self.params["input"][input_id].open(self.tile, **kwargs) | Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data within tile | Below is the the instruction that describes the task:
### Input:
Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data within tile
### Response:
def open(self, input_id, **kwargs):
"""
Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data within tile
"""
if not isinstance(input_id, str):
return input_id.open(self.tile, **kwargs)
if input_id not in self.params["input"]:
raise ValueError("%s not found in config as input file" % input_id)
return self.params["input"][input_id].open(self.tile, **kwargs) |
def splits(cls, fields, root=".data", train="train.txt",
test="test.txt", validation_frac=0.1, **kwargs):
"""Downloads and loads the CoNLL 2000 Chunking dataset.
NOTE: There is only a train and test dataset so we use
10% of the train set as validation
"""
train, test = super(CoNLL2000Chunking, cls).splits(
fields=fields, root=root, train=train,
test=test, separator=' ', **kwargs)
# HACK: Saving the sort key function as the split() call removes it
sort_key = train.sort_key
# Now split the train set
# Force a random seed to make the split deterministic
random.seed(0)
train, val = train.split(1 - validation_frac, random_state=random.getstate())
# Reset the seed
random.seed()
# HACK: Set the sort key
train.sort_key = sort_key
val.sort_key = sort_key
return train, val, test | Downloads and loads the CoNLL 2000 Chunking dataset.
NOTE: There is only a train and test dataset so we use
10% of the train set as validation | Below is the the instruction that describes the task:
### Input:
Downloads and loads the CoNLL 2000 Chunking dataset.
NOTE: There is only a train and test dataset so we use
10% of the train set as validation
### Response:
def splits(cls, fields, root=".data", train="train.txt",
test="test.txt", validation_frac=0.1, **kwargs):
"""Downloads and loads the CoNLL 2000 Chunking dataset.
NOTE: There is only a train and test dataset so we use
10% of the train set as validation
"""
train, test = super(CoNLL2000Chunking, cls).splits(
fields=fields, root=root, train=train,
test=test, separator=' ', **kwargs)
# HACK: Saving the sort key function as the split() call removes it
sort_key = train.sort_key
# Now split the train set
# Force a random seed to make the split deterministic
random.seed(0)
train, val = train.split(1 - validation_frac, random_state=random.getstate())
# Reset the seed
random.seed()
# HACK: Set the sort key
train.sort_key = sort_key
val.sort_key = sort_key
return train, val, test |
def clean_part_ethn(body):
"""
Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese")
"""
# patterns that can create false positive situations
patterns_to_remove = [r'black ?or ?african', r'african ?or ?black', r'no ?black', r'no ?african', r'no ?aa', r'white ?men',
r'white ?gentlemen', r'no ?spanish', r'speak ?spanish', r'black ?(guys|men|hair|client)', r'dark ?hair',
r'(dark ?)?brown ?hair', r'white ?tie']
# indian states to convert the term 'indian'
indian_states = ['awadhi', 'badhi', 'bhutia', 'garhwali', 'halbi', 'kamboj', 'bhattarai', 'bhotiya', 'pardeshi',
'bengali', 'madra', 'tamil', 'rajasthani', 'adivasi']
for p in patterns_to_remove:
body = re.sub(p, '', body)
for i in indian_states:
body = body.replace(i, 'indian')
# regex substitutions
body = re.sub(r'hong ?kong', 'chinese', body)
body = re.sub(r'snow ?bunn(y|ies)', 'white', body)
body = re.sub(r'a\ss\si\sa\sn', 'asian', body)
body = re.sub(r'l\sa\st\si\sn\sa', 'latina', body)
# convert many ethnicity variations into standardized ones (e.g. china -> chinese)
for sub in eth_subs:
body = body.replace(sub, eth_subs[sub])
body = re.sub(r' +', ' ', body)
return body | Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese") | Below is the the instruction that describes the task:
### Input:
Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese")
### Response:
def clean_part_ethn(body):
"""
Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese")
"""
# patterns that can create false positive situations
patterns_to_remove = [r'black ?or ?african', r'african ?or ?black', r'no ?black', r'no ?african', r'no ?aa', r'white ?men',
r'white ?gentlemen', r'no ?spanish', r'speak ?spanish', r'black ?(guys|men|hair|client)', r'dark ?hair',
r'(dark ?)?brown ?hair', r'white ?tie']
# indian states to convert the term 'indian'
indian_states = ['awadhi', 'badhi', 'bhutia', 'garhwali', 'halbi', 'kamboj', 'bhattarai', 'bhotiya', 'pardeshi',
'bengali', 'madra', 'tamil', 'rajasthani', 'adivasi']
for p in patterns_to_remove:
body = re.sub(p, '', body)
for i in indian_states:
body = body.replace(i, 'indian')
# regex substitutions
body = re.sub(r'hong ?kong', 'chinese', body)
body = re.sub(r'snow ?bunn(y|ies)', 'white', body)
body = re.sub(r'a\ss\si\sa\sn', 'asian', body)
body = re.sub(r'l\sa\st\si\sn\sa', 'latina', body)
# convert many ethnicity variations into standardized ones (e.g. china -> chinese)
for sub in eth_subs:
body = body.replace(sub, eth_subs[sub])
body = re.sub(r' +', ' ', body)
return body |
def derivativeZ(self,x,y,z):
'''
Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdz_out = self.func.derivativeZ(x-xShift,y,z)
return dfdz_out | Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs. | Below is the the instruction that describes the task:
### Input:
Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs.
### Response:
def derivativeZ(self,x,y,z):
'''
Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdz_out = self.func.derivativeZ(x-xShift,y,z)
return dfdz_out |
def get_terms_complete(self) -> pd.DataFrame:
''' Gets complete entity data like term/view '''
if not self.terms_complete.empty:
return self.terms_complete
if self.from_backup:
self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH)
return self.terms_complete
ilx2synonyms = self.get_ilx2synonyms()
ilx2existing_ids = self.get_ilx2existing_ids()
ilx2annotations = self.get_ilx2annotations()
ilx2superclass = self.get_ilx2superclass()
ilx_complete = []
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
row['synonyms'] = ilx2synonyms.get(row['ilx'])
row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems
row['annotations'] = ilx2annotations.get(row['ilx'])
row['superclass'] = ilx2superclass.get(row['ilx'])
ilx_complete.append(row)
terms_complete = pd.DataFrame(ilx_complete)
create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH)
return terms_complete | Gets complete entity data like term/view | Below is the the instruction that describes the task:
### Input:
Gets complete entity data like term/view
### Response:
def get_terms_complete(self) -> pd.DataFrame:
''' Gets complete entity data like term/view '''
if not self.terms_complete.empty:
return self.terms_complete
if self.from_backup:
self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH)
return self.terms_complete
ilx2synonyms = self.get_ilx2synonyms()
ilx2existing_ids = self.get_ilx2existing_ids()
ilx2annotations = self.get_ilx2annotations()
ilx2superclass = self.get_ilx2superclass()
ilx_complete = []
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
row['synonyms'] = ilx2synonyms.get(row['ilx'])
row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems
row['annotations'] = ilx2annotations.get(row['ilx'])
row['superclass'] = ilx2superclass.get(row['ilx'])
ilx_complete.append(row)
terms_complete = pd.DataFrame(ilx_complete)
create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH)
return terms_complete |
def normalize_table_name(name):
"""Check if the table name is obviously invalid."""
if not isinstance(name, six.string_types):
raise ValueError("Invalid table name: %r" % name)
name = name.strip()[:63]
if not len(name):
raise ValueError("Invalid table name: %r" % name)
return name | Check if the table name is obviously invalid. | Below is the the instruction that describes the task:
### Input:
Check if the table name is obviously invalid.
### Response:
def normalize_table_name(name):
"""Check if the table name is obviously invalid."""
if not isinstance(name, six.string_types):
raise ValueError("Invalid table name: %r" % name)
name = name.strip()[:63]
if not len(name):
raise ValueError("Invalid table name: %r" % name)
return name |
def load(filename, default=None):
'''
Try to load @filename. If there is no loader for @filename's filetype,
return @default.
'''
ext = get_ext(filename)
if ext in ldict:
return ldict[ext](filename)
else:
return default | Try to load @filename. If there is no loader for @filename's filetype,
return @default. | Below is the the instruction that describes the task:
### Input:
Try to load @filename. If there is no loader for @filename's filetype,
return @default.
### Response:
def load(filename, default=None):
'''
Try to load @filename. If there is no loader for @filename's filetype,
return @default.
'''
ext = get_ext(filename)
if ext in ldict:
return ldict[ext](filename)
else:
return default |
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)
rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2, axis=-1)))
return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv) | Compute regularisation term and contribution to objective
function. | Below is the the instruction that describes the task:
### Input:
Compute regularisation term and contribution to objective
function.
### Response:
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)
rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2, axis=-1)))
return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv) |
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big')) | Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation. | Below is the the instruction that describes the task:
### Input:
Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
### Response:
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big')) |
def rename_file(self, old_path, new_path):
"""Rename the current notebook, as well as its alternative representations"""
if old_path not in self.paired_notebooks:
try:
# we do not know yet if this is a paired notebook (#190)
# -> to get this information we open the notebook
self.get(old_path, content=True)
except Exception:
pass
if old_path not in self.paired_notebooks:
super(TextFileContentsManager, self).rename_file(old_path, new_path)
return
fmt, formats = self.paired_notebooks.get(old_path)
old_alt_paths = paired_paths(old_path, fmt, formats)
# Is the new file name consistent with suffix?
try:
new_base = base_path(new_path, fmt)
except Exception as err:
raise HTTPError(400, str(err))
for old_alt_path, alt_fmt in old_alt_paths:
new_alt_path = full_path(new_base, alt_fmt)
if self.exists(old_alt_path):
super(TextFileContentsManager, self).rename_file(old_alt_path, new_alt_path)
self.drop_paired_notebook(old_path)
self.update_paired_notebooks(new_path, fmt, formats) | Rename the current notebook, as well as its alternative representations | Below is the the instruction that describes the task:
### Input:
Rename the current notebook, as well as its alternative representations
### Response:
def rename_file(self, old_path, new_path):
"""Rename the current notebook, as well as its alternative representations"""
if old_path not in self.paired_notebooks:
try:
# we do not know yet if this is a paired notebook (#190)
# -> to get this information we open the notebook
self.get(old_path, content=True)
except Exception:
pass
if old_path not in self.paired_notebooks:
super(TextFileContentsManager, self).rename_file(old_path, new_path)
return
fmt, formats = self.paired_notebooks.get(old_path)
old_alt_paths = paired_paths(old_path, fmt, formats)
# Is the new file name consistent with suffix?
try:
new_base = base_path(new_path, fmt)
except Exception as err:
raise HTTPError(400, str(err))
for old_alt_path, alt_fmt in old_alt_paths:
new_alt_path = full_path(new_base, alt_fmt)
if self.exists(old_alt_path):
super(TextFileContentsManager, self).rename_file(old_alt_path, new_alt_path)
self.drop_paired_notebook(old_path)
self.update_paired_notebooks(new_path, fmt, formats) |
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object.
Monkey-patch from
'''
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and process.ORIGINAL_DIR
is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
if not main_path.endswith('.exe'):
d['main_path'] = os.path.normpath(main_path)
return d | Return info about parent needed by child to unpickle process object.
Monkey-patch from | Below is the the instruction that describes the task:
### Input:
Return info about parent needed by child to unpickle process object.
Monkey-patch from
### Response:
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object.
Monkey-patch from
'''
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and process.ORIGINAL_DIR
is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
if not main_path.endswith('.exe'):
d['main_path'] = os.path.normpath(main_path)
return d |
def matches_query(self, key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._add_condition(key, '$inQuery', dumped)
return self | 增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query | Below is the the instruction that describes the task:
### Input:
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
### Response:
def matches_query(self, key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._add_condition(key, '$inQuery', dumped)
return self |
async def text(self,
encoding: Optional[str]=None, errors: str='strict') -> str:
"""Read response payload and decode."""
if self._body is None:
await self.read()
if encoding is None:
encoding = self.get_encoding()
return self._body.decode(encoding, errors=errors) | Read response payload and decode. | Below is the the instruction that describes the task:
### Input:
Read response payload and decode.
### Response:
async def text(self,
encoding: Optional[str]=None, errors: str='strict') -> str:
"""Read response payload and decode."""
if self._body is None:
await self.read()
if encoding is None:
encoding = self.get_encoding()
return self._body.decode(encoding, errors=errors) |
def get_pipeline(self, *args, **kwargs):
'''
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
'''
return getattr(missions, self.mission).pipelines.get(self.ID, *args,
**kwargs) | Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission. | Below is the the instruction that describes the task:
### Input:
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
### Response:
def get_pipeline(self, *args, **kwargs):
'''
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
'''
return getattr(missions, self.mission).pipelines.get(self.ID, *args,
**kwargs) |
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica:
"""
Create a new replica with the specified parameters.
"""
return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics) | Create a new replica with the specified parameters. | Below is the the instruction that describes the task:
### Input:
Create a new replica with the specified parameters.
### Response:
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica:
"""
Create a new replica with the specified parameters.
"""
return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics) |
async def fire(self, *args, **kwargs):
"""Fire this event, calling all observers with the same arguments."""
logger.debug('Fired {}'.format(self))
for observer in self._observers:
gen = observer(*args, **kwargs)
if asyncio.iscoroutinefunction(observer):
await gen | Fire this event, calling all observers with the same arguments. | Below is the the instruction that describes the task:
### Input:
Fire this event, calling all observers with the same arguments.
### Response:
async def fire(self, *args, **kwargs):
"""Fire this event, calling all observers with the same arguments."""
logger.debug('Fired {}'.format(self))
for observer in self._observers:
gen = observer(*args, **kwargs)
if asyncio.iscoroutinefunction(observer):
await gen |
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted | Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone. | Below is the the instruction that describes the task:
### Input:
Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
### Response:
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted |
def _set_mac_group(self, v, load=False):
"""
Setter method for mac_group, mapped from YANG variable /mac_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("mac_group_id",mac_group.mac_group, yang_name="mac-group", rest_name="mac-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-group-id', extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}), is_container='list', yang_name="mac-group", rest_name="mac-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_group must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("mac_group_id",mac_group.mac_group, yang_name="mac-group", rest_name="mac-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-group-id', extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}), is_container='list', yang_name="mac-group", rest_name="mac-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)""",
})
self.__mac_group = t
if hasattr(self, '_set'):
self._set() | Setter method for mac_group, mapped from YANG variable /mac_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_group() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mac_group, mapped from YANG variable /mac_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_group() directly.
### Response:
def _set_mac_group(self, v, load=False):
"""
Setter method for mac_group, mapped from YANG variable /mac_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("mac_group_id",mac_group.mac_group, yang_name="mac-group", rest_name="mac-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-group-id', extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}), is_container='list', yang_name="mac-group", rest_name="mac-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_group must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("mac_group_id",mac_group.mac_group, yang_name="mac-group", rest_name="mac-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-group-id', extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}), is_container='list', yang_name="mac-group", rest_name="mac-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Group Configuration', u'cli-no-key-completion': None, u'sort-priority': u'54', u'cli-suppress-list-no': None, u'hidden': u'full', u'callpoint': u'mac-group-config'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)""",
})
self.__mac_group = t
if hasattr(self, '_set'):
self._set() |
def check_web_config_consistency(config):
'''
Check the web application config file for consistency.
'''
login_conf_deps = {
'LOGIN_TWITTER_OAUTH_KEY': ['LOGIN_TWITTER_OAUTH_SECRET'],
'LOGIN_GOOGLE_OAUTH_KEY': ['LOGIN_GOOGLE_OAUTH_SECRET'],
'LOGIN_GITHUB_OAUTH_KEY': ['LOGIN_GITHUB_OAUTH_SECRET'],
'LOGIN_GITLAB_OAUTH_KEY': ['LOGIN_GITLAB_OAUTH_SECRET'],
'LOGIN_TWITTER_OAUTH_SECRET': ['LOGIN_TWITTER_OAUTH_KEY'],
'LOGIN_GOOGLE_OAUTH_SECRET': ['LOGIN_GOOGLE_OAUTH_KEY'],
'LOGIN_GITHUB_OAUTH_SECRET': ['LOGIN_GITHUB_OAUTH_KEY'],
'LOGIN_GITLAB_OAUTH_SECRET': ['LOGIN_GITLAB_OAUTH_KEY'],
'LOGIN_OIDC_ENDPOINT': ['LOGIN_OIDC_CLIENT_ID', 'LOGIN_OIDC_CLIENT_SECRET', 'LOGIN_OIDC_DESCRIPTION'],
'LOGIN_OIDC_CLIENT_ID': ['LOGIN_OIDC_ENDPOINT', 'LOGIN_OIDC_CLIENT_SECRET', 'LOGIN_OIDC_DESCRIPTION'],
'LOGIN_OIDC_CLIENT_SECRET': ['LOGIN_OIDC_ENDPOINT', 'LOGIN_OIDC_CLIENT_ID', 'LOGIN_OIDC_DESCRIPTION'],
}
print("Checking configuration of the OpenSubmit web application...")
# Let Django's manage.py load the settings file, to see if this works in general
django_admin(["check"])
# Check configuration dependencies
for k, v in list(login_conf_deps.items()):
if config.get('login', k):
for needed in v:
if len(config.get('login', needed)) < 1:
print(
"ERROR: You have enabled %s in settings.ini, but %s is not set." % (k, needed))
return False
# Check media path
check_path(config.get('server', 'MEDIA_ROOT'))
# Prepare empty log file, in case the web server has no creation rights
log_file = config.get('server', 'LOG_FILE')
print("Preparing log file at " + log_file)
check_file(log_file)
# If SQLite database, adjust file system permissions for the web server
if config.get('database', 'DATABASE_ENGINE') == 'sqlite3':
name = config.get('database', 'DATABASE_NAME')
if not os.path.isabs(name):
print("ERROR: Your SQLite database name must be an absolute path. The web server must have directory access permissions for this path.")
return False
check_file(config.get('database', 'DATABASE_NAME'))
# everything ok
return True | Check the web application config file for consistency. | Below is the the instruction that describes the task:
### Input:
Check the web application config file for consistency.
### Response:
def check_web_config_consistency(config):
'''
Check the web application config file for consistency.
'''
login_conf_deps = {
'LOGIN_TWITTER_OAUTH_KEY': ['LOGIN_TWITTER_OAUTH_SECRET'],
'LOGIN_GOOGLE_OAUTH_KEY': ['LOGIN_GOOGLE_OAUTH_SECRET'],
'LOGIN_GITHUB_OAUTH_KEY': ['LOGIN_GITHUB_OAUTH_SECRET'],
'LOGIN_GITLAB_OAUTH_KEY': ['LOGIN_GITLAB_OAUTH_SECRET'],
'LOGIN_TWITTER_OAUTH_SECRET': ['LOGIN_TWITTER_OAUTH_KEY'],
'LOGIN_GOOGLE_OAUTH_SECRET': ['LOGIN_GOOGLE_OAUTH_KEY'],
'LOGIN_GITHUB_OAUTH_SECRET': ['LOGIN_GITHUB_OAUTH_KEY'],
'LOGIN_GITLAB_OAUTH_SECRET': ['LOGIN_GITLAB_OAUTH_KEY'],
'LOGIN_OIDC_ENDPOINT': ['LOGIN_OIDC_CLIENT_ID', 'LOGIN_OIDC_CLIENT_SECRET', 'LOGIN_OIDC_DESCRIPTION'],
'LOGIN_OIDC_CLIENT_ID': ['LOGIN_OIDC_ENDPOINT', 'LOGIN_OIDC_CLIENT_SECRET', 'LOGIN_OIDC_DESCRIPTION'],
'LOGIN_OIDC_CLIENT_SECRET': ['LOGIN_OIDC_ENDPOINT', 'LOGIN_OIDC_CLIENT_ID', 'LOGIN_OIDC_DESCRIPTION'],
}
print("Checking configuration of the OpenSubmit web application...")
# Let Django's manage.py load the settings file, to see if this works in general
django_admin(["check"])
# Check configuration dependencies
for k, v in list(login_conf_deps.items()):
if config.get('login', k):
for needed in v:
if len(config.get('login', needed)) < 1:
print(
"ERROR: You have enabled %s in settings.ini, but %s is not set." % (k, needed))
return False
# Check media path
check_path(config.get('server', 'MEDIA_ROOT'))
# Prepare empty log file, in case the web server has no creation rights
log_file = config.get('server', 'LOG_FILE')
print("Preparing log file at " + log_file)
check_file(log_file)
# If SQLite database, adjust file system permissions for the web server
if config.get('database', 'DATABASE_ENGINE') == 'sqlite3':
name = config.get('database', 'DATABASE_NAME')
if not os.path.isabs(name):
print("ERROR: Your SQLite database name must be an absolute path. The web server must have directory access permissions for this path.")
return False
check_file(config.get('database', 'DATABASE_NAME'))
# everything ok
return True |
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag | Internal helper to construct opening and closing tag expressions, given a tag name | Below is the the instruction that describes the task:
### Input:
Internal helper to construct opening and closing tag expressions, given a tag name
### Response:
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag |
def summary(self, raw):
"""
Return one taxonomy summarizing the reported tags
If there is only one tag, use it as the predicate
If there are multiple tags, use "entries" as the predicate
Use the total count as the value
Use the most malicious level found
Examples:
Input
{
"name": SCANNER1,
"intention": ""
}
Output
GreyNoise:SCANNER1 = 1 (info)
Input
{
"name": SCANNER1,
"intention": "malicious"
},
{
"name": SCANNER1,
"intention": "benign"
}
Output
GreyNoise:SCANNER1 = 2 (malicious)
Input
{
"name": SCANNER1,
"intention": ""
},
{
"name": SCANNER1,
"intention": "safe"
},
{
"name": SCANNER2,
"intention": ""
}
Output
GreyNoise:entries = 3 (safe)
"""
try:
taxonomies = []
if raw.get('records'):
final_level = None
taxonomy_data = defaultdict(int)
for record in raw.get('records', []):
name = record.get('name', 'unknown')
intention = record.get('intention', 'unknown')
taxonomy_data[name] += 1
final_level = self._get_level(final_level, intention)
if len(taxonomy_data) > 1: # Multiple tags have been found
taxonomies.append(self.build_taxonomy(final_level, 'GreyNoise', 'entries', len(taxonomy_data)))
else: # There is only one tag found, possibly multiple times
for name, count in taxonomy_data.iteritems():
taxonomies.append(self.build_taxonomy(final_level, 'GreyNoise', name, count))
else:
taxonomies.append(self.build_taxonomy('info', 'GreyNoise', 'Records', 'None'))
return {"taxonomies": taxonomies}
except Exception as e:
self.error('Summary failed\n{}'.format(e.message)) | Return one taxonomy summarizing the reported tags
If there is only one tag, use it as the predicate
If there are multiple tags, use "entries" as the predicate
Use the total count as the value
Use the most malicious level found
Examples:
Input
{
"name": SCANNER1,
"intention": ""
}
Output
GreyNoise:SCANNER1 = 1 (info)
Input
{
"name": SCANNER1,
"intention": "malicious"
},
{
"name": SCANNER1,
"intention": "benign"
}
Output
GreyNoise:SCANNER1 = 2 (malicious)
Input
{
"name": SCANNER1,
"intention": ""
},
{
"name": SCANNER1,
"intention": "safe"
},
{
"name": SCANNER2,
"intention": ""
}
Output
GreyNoise:entries = 3 (safe) | Below is the the instruction that describes the task:
### Input:
Return one taxonomy summarizing the reported tags
If there is only one tag, use it as the predicate
If there are multiple tags, use "entries" as the predicate
Use the total count as the value
Use the most malicious level found
Examples:
Input
{
"name": SCANNER1,
"intention": ""
}
Output
GreyNoise:SCANNER1 = 1 (info)
Input
{
"name": SCANNER1,
"intention": "malicious"
},
{
"name": SCANNER1,
"intention": "benign"
}
Output
GreyNoise:SCANNER1 = 2 (malicious)
Input
{
"name": SCANNER1,
"intention": ""
},
{
"name": SCANNER1,
"intention": "safe"
},
{
"name": SCANNER2,
"intention": ""
}
Output
GreyNoise:entries = 3 (safe)
### Response:
def summary(self, raw):
"""
Return one taxonomy summarizing the reported tags
If there is only one tag, use it as the predicate
If there are multiple tags, use "entries" as the predicate
Use the total count as the value
Use the most malicious level found
Examples:
Input
{
"name": SCANNER1,
"intention": ""
}
Output
GreyNoise:SCANNER1 = 1 (info)
Input
{
"name": SCANNER1,
"intention": "malicious"
},
{
"name": SCANNER1,
"intention": "benign"
}
Output
GreyNoise:SCANNER1 = 2 (malicious)
Input
{
"name": SCANNER1,
"intention": ""
},
{
"name": SCANNER1,
"intention": "safe"
},
{
"name": SCANNER2,
"intention": ""
}
Output
GreyNoise:entries = 3 (safe)
"""
try:
taxonomies = []
if raw.get('records'):
final_level = None
taxonomy_data = defaultdict(int)
for record in raw.get('records', []):
name = record.get('name', 'unknown')
intention = record.get('intention', 'unknown')
taxonomy_data[name] += 1
final_level = self._get_level(final_level, intention)
if len(taxonomy_data) > 1: # Multiple tags have been found
taxonomies.append(self.build_taxonomy(final_level, 'GreyNoise', 'entries', len(taxonomy_data)))
else: # There is only one tag found, possibly multiple times
for name, count in taxonomy_data.iteritems():
taxonomies.append(self.build_taxonomy(final_level, 'GreyNoise', name, count))
else:
taxonomies.append(self.build_taxonomy('info', 'GreyNoise', 'Records', 'None'))
return {"taxonomies": taxonomies}
except Exception as e:
self.error('Summary failed\n{}'.format(e.message)) |
def rowlengths(table):
"""
Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files.
"""
counter = Counter()
for row in data(table):
counter[len(row)] += 1
output = [('length', 'count')]
output.extend(counter.most_common())
return wrap(output) | Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files. | Below is the the instruction that describes the task:
### Input:
Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files.
### Response:
def rowlengths(table):
"""
Report on row lengths found in the table. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... [u'B', u'3', u'7.8', True],
... ['D', 'xyz', 9.0],
... ['E', None],
... ['F', 9]]
>>> etl.rowlengths(table)
+--------+-------+
| length | count |
+========+=======+
| 3 | 3 |
+--------+-------+
| 2 | 2 |
+--------+-------+
| 4 | 1 |
+--------+-------+
Useful for finding potential problems in data files.
"""
counter = Counter()
for row in data(table):
counter[len(row)] += 1
output = [('length', 'count')]
output.extend(counter.most_common())
return wrap(output) |
def read_header_at( cls, f):
"""
Given an open file-like object, read a block header
from it and return it as a dict containing:
* version (int)
* prev_block_hash (hex str)
* merkle_root (hex str)
* timestamp (int)
* bits (int)
* nonce (ini)
* hash (hex str)
"""
header_parser = BlockHeaderSerializer()
hdr = header_parser.deserialize( f )
h = {}
h['version'] = hdr.version
h['prev_block_hash'] = "%064x" % hdr.prev_block
h['merkle_root'] = "%064x" % hdr.merkle_root
h['timestamp'] = hdr.timestamp
h['bits'] = hdr.bits
h['nonce'] = hdr.nonce
h['hash'] = hdr.calculate_hash()
return h | Given an open file-like object, read a block header
from it and return it as a dict containing:
* version (int)
* prev_block_hash (hex str)
* merkle_root (hex str)
* timestamp (int)
* bits (int)
* nonce (ini)
* hash (hex str) | Below is the the instruction that describes the task:
### Input:
Given an open file-like object, read a block header
from it and return it as a dict containing:
* version (int)
* prev_block_hash (hex str)
* merkle_root (hex str)
* timestamp (int)
* bits (int)
* nonce (ini)
* hash (hex str)
### Response:
def read_header_at( cls, f):
"""
Given an open file-like object, read a block header
from it and return it as a dict containing:
* version (int)
* prev_block_hash (hex str)
* merkle_root (hex str)
* timestamp (int)
* bits (int)
* nonce (ini)
* hash (hex str)
"""
header_parser = BlockHeaderSerializer()
hdr = header_parser.deserialize( f )
h = {}
h['version'] = hdr.version
h['prev_block_hash'] = "%064x" % hdr.prev_block
h['merkle_root'] = "%064x" % hdr.merkle_root
h['timestamp'] = hdr.timestamp
h['bits'] = hdr.bits
h['nonce'] = hdr.nonce
h['hash'] = hdr.calculate_hash()
return h |
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern) | show status | Below is the the instruction that describes the task:
### Input:
show status
### Response:
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern) |
def type_map_directive_reducer(
map_: TypeMap, directive: GraphQLDirective = None
) -> TypeMap:
"""Reducer function for creating the type map from given directives."""
# Directives are not validated until validate_schema() is called.
if not is_directive(directive):
return map_
directive = cast(GraphQLDirective, directive)
return reduce(
lambda prev_map, arg: type_map_reducer(prev_map, arg.type), # type: ignore
directive.args.values(),
map_,
) | Reducer function for creating the type map from given directives. | Below is the the instruction that describes the task:
### Input:
Reducer function for creating the type map from given directives.
### Response:
def type_map_directive_reducer(
map_: TypeMap, directive: GraphQLDirective = None
) -> TypeMap:
"""Reducer function for creating the type map from given directives."""
# Directives are not validated until validate_schema() is called.
if not is_directive(directive):
return map_
directive = cast(GraphQLDirective, directive)
return reduce(
lambda prev_map, arg: type_map_reducer(prev_map, arg.type), # type: ignore
directive.args.values(),
map_,
) |
def _package(self, task, *args, **kw):
"""
Used internally. Simply wraps the arguments up in a list and encodes
the list.
"""
# Implementation note: it is faster to use a tuple than a list here,
# because json does the list-like check like so (json/encoder.py:424):
# isinstance(o, (list, tuple))
# Because of this order, it is actually faster to create a list:
# >>> timeit.timeit('L = [1,2,3]\nisinstance(L, (list, tuple))')
# 0.41077208518981934
# >>> timeit.timeit('L = (1,2,3)\nisinstance(L, (list, tuple))')
# 0.49509215354919434
# Whereas if it were the other way around, using a tuple would be
# faster:
# >>> timeit.timeit('L = (1,2,3)\nisinstance(L, (tuple, list))')
# 0.3031749725341797
# >>> timeit.timeit('L = [1,2,3]\nisinstance(L, (tuple, list))')
# 0.6147568225860596
return self.codec.encode([task, args, kw]) | Used internally. Simply wraps the arguments up in a list and encodes
the list. | Below is the the instruction that describes the task:
### Input:
Used internally. Simply wraps the arguments up in a list and encodes
the list.
### Response:
def _package(self, task, *args, **kw):
"""
Used internally. Simply wraps the arguments up in a list and encodes
the list.
"""
# Implementation note: it is faster to use a tuple than a list here,
# because json does the list-like check like so (json/encoder.py:424):
# isinstance(o, (list, tuple))
# Because of this order, it is actually faster to create a list:
# >>> timeit.timeit('L = [1,2,3]\nisinstance(L, (list, tuple))')
# 0.41077208518981934
# >>> timeit.timeit('L = (1,2,3)\nisinstance(L, (list, tuple))')
# 0.49509215354919434
# Whereas if it were the other way around, using a tuple would be
# faster:
# >>> timeit.timeit('L = (1,2,3)\nisinstance(L, (tuple, list))')
# 0.3031749725341797
# >>> timeit.timeit('L = [1,2,3]\nisinstance(L, (tuple, list))')
# 0.6147568225860596
return self.codec.encode([task, args, kw]) |
def write_timestamp(self, v):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch.
"""
self.out.write(pack('>q', long(mktime(v.timetuple())))) | Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch. | Below is the the instruction that describes the task:
### Input:
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch.
### Response:
def write_timestamp(self, v):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix epoch.
"""
self.out.write(pack('>q', long(mktime(v.timetuple())))) |
def check(self):
"""
Returns #True if the time interval has passed.
"""
if self.value is None:
return True
return (time.clock() - self.start) >= self.value | Returns #True if the time interval has passed. | Below is the the instruction that describes the task:
### Input:
Returns #True if the time interval has passed.
### Response:
def check(self):
"""
Returns #True if the time interval has passed.
"""
if self.value is None:
return True
return (time.clock() - self.start) >= self.value |
def to_bytes(data: Union[str, bytes]) -> bytes:
"""
:param data: Data to convert to bytes.
:type data: Union[str, bytes]
:return: `data` encoded to UTF8.
:rtype: bytes
"""
if isinstance(data, bytes):
return data
return data.encode('utf-8') | :param data: Data to convert to bytes.
:type data: Union[str, bytes]
:return: `data` encoded to UTF8.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
:param data: Data to convert to bytes.
:type data: Union[str, bytes]
:return: `data` encoded to UTF8.
:rtype: bytes
### Response:
def to_bytes(data: Union[str, bytes]) -> bytes:
"""
:param data: Data to convert to bytes.
:type data: Union[str, bytes]
:return: `data` encoded to UTF8.
:rtype: bytes
"""
if isinstance(data, bytes):
return data
return data.encode('utf-8') |
def add(self, record):
"""Add a reference to the provided record"""
self._field.validate_value(record)
self._elements[record.id] = record
self._sync_field() | Add a reference to the provided record | Below is the the instruction that describes the task:
### Input:
Add a reference to the provided record
### Response:
def add(self, record):
"""Add a reference to the provided record"""
self._field.validate_value(record)
self._elements[record.id] = record
self._sync_field() |
def n_feature_hash(feature, dims, seeds):
"""N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
"""
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec | N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`. | Below is the the instruction that describes the task:
### Input:
N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
### Response:
def n_feature_hash(feature, dims, seeds):
"""N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
"""
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec |
def data_and_labels(self):
"""
Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids
"""
sample_ids = np.array(self.keys)
label_dict = self.labels
matrix = np.full([self.num_samples, self.num_features], np.nan)
labels = np.full([self.num_samples, 1], np.nan)
for ix, sample in enumerate(sample_ids):
matrix[ix, :] = self.__data[sample]
labels[ix] = label_dict[sample]
return matrix, np.ravel(labels), sample_ids | Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids | Below is the the instruction that describes the task:
### Input:
Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids
### Response:
def data_and_labels(self):
"""
Dataset features and labels in a matrix form for learning.
Also returns sample_ids in the same order.
Returns
-------
data_matrix : ndarray
2D array of shape [num_samples, num_features]
with features corresponding row-wise to sample_ids
labels : ndarray
Array of numeric labels for each sample corresponding row-wise to sample_ids
sample_ids : list
List of sample ids
"""
sample_ids = np.array(self.keys)
label_dict = self.labels
matrix = np.full([self.num_samples, self.num_features], np.nan)
labels = np.full([self.num_samples, 1], np.nan)
for ix, sample in enumerate(sample_ids):
matrix[ix, :] = self.__data[sample]
labels[ix] = label_dict[sample]
return matrix, np.ravel(labels), sample_ids |
def write_addons_items(xml_tree, records, app_id, api_ver=3, app_ver=None):
"""Generate the addons blocklists.
<emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com">
<versionRange minVersion="0" maxVersion="*" severity="3">
<targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
<versionRange minVersion="39.0a1" maxVersion="*"/>
</targetApplication>
</versionRange>
<prefs>
<pref>browser.startup.homepage</pref>
<pref>browser.search.defaultenginename</pref>
</prefs>
</emItem>
"""
if not records:
return
emItems = etree.SubElement(xml_tree, 'emItems')
groupby = {}
for item in records:
if is_related_to(item, app_id, app_ver):
if item['guid'] in groupby:
emItem = groupby[item['guid']]
# When creating new records from the Kinto Admin we don't have proper blockID.
if 'blockID' in item:
# Remove the first caracter which is the letter i to
# compare the numeric value i45 < i356.
current_blockID = int(item['blockID'][1:])
previous_blockID = int(emItem.attrib['blockID'][1:])
# Group by and keep the biggest blockID in the XML file.
if current_blockID > previous_blockID:
emItem.attrib['blockID'] = item['blockID']
else:
# If the latest entry does not have any blockID attribute, its
# ID should be used. (the list of records is sorted by ascending
# last_modified).
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1473194
emItem.attrib['blockID'] = item['id']
else:
emItem = etree.SubElement(emItems, 'emItem',
blockID=item.get('blockID', item['id']))
groupby[item['guid']] = emItem
prefs = etree.SubElement(emItem, 'prefs')
for p in item['prefs']:
pref = etree.SubElement(prefs, 'pref')
pref.text = p
# Set the add-on ID
emItem.set('id', item['guid'])
for field in ['name', 'os']:
if field in item:
emItem.set(field, item[field])
build_version_range(emItem, item, app_id) | Generate the addons blocklists.
<emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com">
<versionRange minVersion="0" maxVersion="*" severity="3">
<targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
<versionRange minVersion="39.0a1" maxVersion="*"/>
</targetApplication>
</versionRange>
<prefs>
<pref>browser.startup.homepage</pref>
<pref>browser.search.defaultenginename</pref>
</prefs>
</emItem> | Below is the the instruction that describes the task:
### Input:
Generate the addons blocklists.
<emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com">
<versionRange minVersion="0" maxVersion="*" severity="3">
<targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
<versionRange minVersion="39.0a1" maxVersion="*"/>
</targetApplication>
</versionRange>
<prefs>
<pref>browser.startup.homepage</pref>
<pref>browser.search.defaultenginename</pref>
</prefs>
</emItem>
### Response:
def write_addons_items(xml_tree, records, app_id, api_ver=3, app_ver=None):
"""Generate the addons blocklists.
<emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com">
<versionRange minVersion="0" maxVersion="*" severity="3">
<targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
<versionRange minVersion="39.0a1" maxVersion="*"/>
</targetApplication>
</versionRange>
<prefs>
<pref>browser.startup.homepage</pref>
<pref>browser.search.defaultenginename</pref>
</prefs>
</emItem>
"""
if not records:
return
emItems = etree.SubElement(xml_tree, 'emItems')
groupby = {}
for item in records:
if is_related_to(item, app_id, app_ver):
if item['guid'] in groupby:
emItem = groupby[item['guid']]
# When creating new records from the Kinto Admin we don't have proper blockID.
if 'blockID' in item:
# Remove the first caracter which is the letter i to
# compare the numeric value i45 < i356.
current_blockID = int(item['blockID'][1:])
previous_blockID = int(emItem.attrib['blockID'][1:])
# Group by and keep the biggest blockID in the XML file.
if current_blockID > previous_blockID:
emItem.attrib['blockID'] = item['blockID']
else:
# If the latest entry does not have any blockID attribute, its
# ID should be used. (the list of records is sorted by ascending
# last_modified).
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1473194
emItem.attrib['blockID'] = item['id']
else:
emItem = etree.SubElement(emItems, 'emItem',
blockID=item.get('blockID', item['id']))
groupby[item['guid']] = emItem
prefs = etree.SubElement(emItem, 'prefs')
for p in item['prefs']:
pref = etree.SubElement(prefs, 'pref')
pref.text = p
# Set the add-on ID
emItem.set('id', item['guid'])
for field in ['name', 'os']:
if field in item:
emItem.set(field, item[field])
build_version_range(emItem, item, app_id) |
def create(self, group_view=False):
""" Update with current tools """
self.add_handlers({'^T': self.quit, '^Q': self.quit})
self.add(npyscreen.TitleText,
name='Select which tools to ' + self.action['action'] + ':',
editable=False)
togglable = ['remove']
if self.action['action_name'] in togglable:
self.cur_view = self.add(npyscreen.TitleText,
name='Group view:',
value='all groups', editable=False,
rely=3)
self.add_handlers({'^V': self.toggle_view})
i = 5
else:
i = 4
if self.action['action_name'] == 'start':
response = self.tools_inst.inventory(choices=['repos',
'tools',
'built',
'running'])
else:
response = self.tools_inst.inventory(choices=['repos', 'tools'])
if response[0]:
inventory = response[1]
repos = inventory['repos']
# dict has repo as key and list of core/non-core tools as values
has_core = {}
has_non_core = {}
# find all tools that are in this repo
# and list them if they are core
for repo in repos:
core_list = []
ncore_list = []
# splice the repo names for processing
if (repo.startswith('http')):
repo_name = repo.rsplit('/', 2)[1:]
else:
repo_name = repo.split('/')
for tool in inventory['tools']:
tool_repo_name = tool.split(':')
# cross reference repo names
if (repo_name[0] == tool_repo_name[0] and
repo_name[1] == tool_repo_name[1]):
# check to ensure tool not set to locally active = no
# in vent.cfg
externally_active = False
vent_cfg_file = self.api_action.vent_config
vent_cfg = Template(vent_cfg_file)
tool_pairs = vent_cfg.section('external-services')[1]
for ext_tool in tool_pairs:
if ext_tool[0].lower() == inventory['tools'][tool]:
try:
ext_tool_options = json.loads(ext_tool[1])
loc = 'locally_active'
if (loc in ext_tool_options and
ext_tool_options[loc] == 'no'):
externally_active = True
except Exception as e:
self.logger.error("Couldn't check ext"
' because: ' + str(e))
externally_active = False
manifest = Template(self.api_action.manifest)
if not externally_active:
instance_num = re.search(r'\d+$',
manifest.option(
tool, 'name')[1])
if not instance_num:
ncore_list.append(tool)
# multiple instances share same image
elif self.action['action_name'] not in self.no_instance:
ncore_list.append(tool)
has_core[repo] = core_list
has_non_core[repo] = ncore_list
for repo in repos:
self.tools_tc[repo] = {}
if self.action['cores']:
# make sure only repos with core tools are displayed
if has_core.get(repo):
self.repo_widgets[repo] = self.add(npyscreen.TitleText,
name='Plugin: '+repo,
editable=False,
rely=i, relx=5)
for tool in has_core[repo]:
tool_name = tool.split(':', 2)[2].split('/')[-1]
if tool_name == '':
tool_name = '/'
self.tools_tc[repo][tool] = self.add(
npyscreen.CheckBox, name=tool_name,
value=True, relx=10)
i += 1
i += 3
else:
# make sure only repos with non-core tools are displayed
if has_non_core.get(repo):
self.repo_widgets[repo] = self.add(npyscreen.TitleText,
name='Plugin: '+repo,
editable=False,
rely=i, relx=5)
for tool in has_non_core[repo]:
tool_name = tool.split(':', 2)[2].split('/')[-1]
if tool_name == '':
tool_name = '/'
self.tools_tc[repo][tool] = self.add(
npyscreen.CheckBox, name=tool_name,
value=True, relx=10)
i += 1
i += 3
return | Update with current tools | Below is the the instruction that describes the task:
### Input:
Update with current tools
### Response:
def create(self, group_view=False):
""" Update with current tools """
self.add_handlers({'^T': self.quit, '^Q': self.quit})
self.add(npyscreen.TitleText,
name='Select which tools to ' + self.action['action'] + ':',
editable=False)
togglable = ['remove']
if self.action['action_name'] in togglable:
self.cur_view = self.add(npyscreen.TitleText,
name='Group view:',
value='all groups', editable=False,
rely=3)
self.add_handlers({'^V': self.toggle_view})
i = 5
else:
i = 4
if self.action['action_name'] == 'start':
response = self.tools_inst.inventory(choices=['repos',
'tools',
'built',
'running'])
else:
response = self.tools_inst.inventory(choices=['repos', 'tools'])
if response[0]:
inventory = response[1]
repos = inventory['repos']
# dict has repo as key and list of core/non-core tools as values
has_core = {}
has_non_core = {}
# find all tools that are in this repo
# and list them if they are core
for repo in repos:
core_list = []
ncore_list = []
# splice the repo names for processing
if (repo.startswith('http')):
repo_name = repo.rsplit('/', 2)[1:]
else:
repo_name = repo.split('/')
for tool in inventory['tools']:
tool_repo_name = tool.split(':')
# cross reference repo names
if (repo_name[0] == tool_repo_name[0] and
repo_name[1] == tool_repo_name[1]):
# check to ensure tool not set to locally active = no
# in vent.cfg
externally_active = False
vent_cfg_file = self.api_action.vent_config
vent_cfg = Template(vent_cfg_file)
tool_pairs = vent_cfg.section('external-services')[1]
for ext_tool in tool_pairs:
if ext_tool[0].lower() == inventory['tools'][tool]:
try:
ext_tool_options = json.loads(ext_tool[1])
loc = 'locally_active'
if (loc in ext_tool_options and
ext_tool_options[loc] == 'no'):
externally_active = True
except Exception as e:
self.logger.error("Couldn't check ext"
' because: ' + str(e))
externally_active = False
manifest = Template(self.api_action.manifest)
if not externally_active:
instance_num = re.search(r'\d+$',
manifest.option(
tool, 'name')[1])
if not instance_num:
ncore_list.append(tool)
# multiple instances share same image
elif self.action['action_name'] not in self.no_instance:
ncore_list.append(tool)
has_core[repo] = core_list
has_non_core[repo] = ncore_list
for repo in repos:
self.tools_tc[repo] = {}
if self.action['cores']:
# make sure only repos with core tools are displayed
if has_core.get(repo):
self.repo_widgets[repo] = self.add(npyscreen.TitleText,
name='Plugin: '+repo,
editable=False,
rely=i, relx=5)
for tool in has_core[repo]:
tool_name = tool.split(':', 2)[2].split('/')[-1]
if tool_name == '':
tool_name = '/'
self.tools_tc[repo][tool] = self.add(
npyscreen.CheckBox, name=tool_name,
value=True, relx=10)
i += 1
i += 3
else:
# make sure only repos with non-core tools are displayed
if has_non_core.get(repo):
self.repo_widgets[repo] = self.add(npyscreen.TitleText,
name='Plugin: '+repo,
editable=False,
rely=i, relx=5)
for tool in has_non_core[repo]:
tool_name = tool.split(':', 2)[2].split('/')[-1]
if tool_name == '':
tool_name = '/'
self.tools_tc[repo][tool] = self.add(
npyscreen.CheckBox, name=tool_name,
value=True, relx=10)
i += 1
i += 3
return |
async def jsk_show(self, ctx: commands.Context):
"""
Shows Jishaku in the help command.
"""
if not self.jsk.hidden:
return await ctx.send("Jishaku is already visible.")
self.jsk.hidden = False
await ctx.send("Jishaku is now visible.") | Shows Jishaku in the help command. | Below is the the instruction that describes the task:
### Input:
Shows Jishaku in the help command.
### Response:
async def jsk_show(self, ctx: commands.Context):
"""
Shows Jishaku in the help command.
"""
if not self.jsk.hidden:
return await ctx.send("Jishaku is already visible.")
self.jsk.hidden = False
await ctx.send("Jishaku is now visible.") |
def weibull(target, seeds, shape, scale, loc):
r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
"""
seeds = target[seeds]
value = spts.weibull_min.ppf(q=seeds, c=shape, scale=scale, loc=loc)
return value | r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50) | Below is the the instruction that describes the task:
### Input:
r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
### Response:
def weibull(target, seeds, shape, scale, loc):
r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
shape : float
This controls the skewness of the distribution, with 'shape' < 1 giving
values clustered on the low end of the range with a long tail, and
'shape' > 1 giving a more symmetrical distribution.
scale : float
This controls the width of the distribution with most of values falling
below this number.
loc : float
Applies an offset to the distribution such that the smallest values are
above this number.
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'weibull_min' method of the scipy.stats module. This can
be used to find suitable values of 'shape', 'scale'` and 'loc'. Note that
'shape' is represented by 'c' in the actual function call.
>>> import scipy
>>> func = scipy.stats.weibull_min(c=1.5, scale=0.0001, loc=0)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
"""
seeds = target[seeds]
value = spts.weibull_min.ppf(q=seeds, c=shape, scale=scale, loc=loc)
return value |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
NWSRFS Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse
with open(path, 'r') as nwsrfsFile:
for line in nwsrfsFile:
sline = line.strip().split()
# Cases
if sline[0].lower() == 'number_bands:':
self.numBands = sline[1]
elif sline[0].lower() == 'lower_elevation':
"""DO NOTHING"""
else:
# Create GSSHAPY NwsrfsRecord object
record = NwsrfsRecord(lowerElev=sline[0],
upperElev=sline[1],
mfMin=sline[2],
mfMax=sline[3],
scf=sline[4],
frUse=sline[5],
tipm=sline[6],
nmf=sline[7],
fua=sline[8],
plwhc=sline[9])
# Associate NwsrfsRecord with NwsrfsFile
record.nwsrfsFile = self | NWSRFS Read from File Method | Below is the the instruction that describes the task:
### Input:
NWSRFS Read from File Method
### Response:
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
NWSRFS Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse
with open(path, 'r') as nwsrfsFile:
for line in nwsrfsFile:
sline = line.strip().split()
# Cases
if sline[0].lower() == 'number_bands:':
self.numBands = sline[1]
elif sline[0].lower() == 'lower_elevation':
"""DO NOTHING"""
else:
# Create GSSHAPY NwsrfsRecord object
record = NwsrfsRecord(lowerElev=sline[0],
upperElev=sline[1],
mfMin=sline[2],
mfMax=sline[3],
scf=sline[4],
frUse=sline[5],
tipm=sline[6],
nmf=sline[7],
fua=sline[8],
plwhc=sline[9])
# Associate NwsrfsRecord with NwsrfsFile
record.nwsrfsFile = self |
def compute_n_digit_freqs(filename, n):
"""
Read digits of pi from a file and compute the n digit frequencies.
"""
d = txt_file_to_digits(filename)
freqs = n_digit_freqs(d, n)
return freqs | Read digits of pi from a file and compute the n digit frequencies. | Below is the the instruction that describes the task:
### Input:
Read digits of pi from a file and compute the n digit frequencies.
### Response:
def compute_n_digit_freqs(filename, n):
"""
Read digits of pi from a file and compute the n digit frequencies.
"""
d = txt_file_to_digits(filename)
freqs = n_digit_freqs(d, n)
return freqs |
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath | Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file. | Below is the the instruction that describes the task:
### Input:
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
### Response:
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath |
def send_tunnelling_request(self, cemi, auto_connect=True):
"""Sends a tunneling request based on the given CEMI data.
This method does not wait for an acknowledge or result frame.
"""
if not self.connected:
if auto_connect:
if not self.connect():
raise KNXException("KNX tunnel not reconnected")
else:
raise KNXException("KNX tunnel not connected")
frame = KNXIPFrame(KNXIPFrame.TUNNELING_REQUEST)
# Connection header see KNXnet/IP 4.4.6 TUNNELLING_REQUEST
body = [0x04, self.channel, self.seq, 0x00]
if self.seq < 0xff:
self.seq += 1
else:
self.seq = 0
body.extend(cemi.to_body())
frame.body = body
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
# See KNX specification 3.8.4 chapter 2.6 "Frame confirmation"
# Send KNX packet 2 times if not acknowledged and close
# the connection if no ack is received
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# Resend package if not acknowledged after 1 seconds
if not res:
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# disconnect and reconnect of not acknowledged
if not res:
self.disconnect()
self.connect()
return res | Sends a tunneling request based on the given CEMI data.
This method does not wait for an acknowledge or result frame. | Below is the the instruction that describes the task:
### Input:
Sends a tunneling request based on the given CEMI data.
This method does not wait for an acknowledge or result frame.
### Response:
def send_tunnelling_request(self, cemi, auto_connect=True):
"""Sends a tunneling request based on the given CEMI data.
This method does not wait for an acknowledge or result frame.
"""
if not self.connected:
if auto_connect:
if not self.connect():
raise KNXException("KNX tunnel not reconnected")
else:
raise KNXException("KNX tunnel not connected")
frame = KNXIPFrame(KNXIPFrame.TUNNELING_REQUEST)
# Connection header see KNXnet/IP 4.4.6 TUNNELLING_REQUEST
body = [0x04, self.channel, self.seq, 0x00]
if self.seq < 0xff:
self.seq += 1
else:
self.seq = 0
body.extend(cemi.to_body())
frame.body = body
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
# See KNX specification 3.8.4 chapter 2.6 "Frame confirmation"
# Send KNX packet 2 times if not acknowledged and close
# the connection if no ack is received
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# Resend package if not acknowledged after 1 seconds
if not res:
self.data_server.socket.sendto(
frame.to_frame(), (self.remote_ip, self.remote_port))
res = self.ack_semaphore.acquire(blocking=True, timeout=1)
# disconnect and reconnect of not acknowledged
if not res:
self.disconnect()
self.connect()
return res |
def est_payouts(self):
'''
Calculate current estimate of average payout for each bandit.
Returns
-------
array of floats or None
'''
if len(self.choices) < 1:
print('slots: No trials run so far.')
return None
else:
return self.wins/(self.pulls+0.1) | Calculate current estimate of average payout for each bandit.
Returns
-------
array of floats or None | Below is the the instruction that describes the task:
### Input:
Calculate current estimate of average payout for each bandit.
Returns
-------
array of floats or None
### Response:
def est_payouts(self):
'''
Calculate current estimate of average payout for each bandit.
Returns
-------
array of floats or None
'''
if len(self.choices) < 1:
print('slots: No trials run so far.')
return None
else:
return self.wins/(self.pulls+0.1) |
def add_feature(self, label, value=None):
"""
label: A VW label (not containing characters from escape_dict.keys(),
unless 'escape' mode is on)
value: float giving the weight or magnitude of this feature
"""
if self.escape:
label = escape_vw_string(label)
elif self.validate:
validate_vw_string(label)
feature = (label, value)
self.features.append(feature) | label: A VW label (not containing characters from escape_dict.keys(),
unless 'escape' mode is on)
value: float giving the weight or magnitude of this feature | Below is the the instruction that describes the task:
### Input:
label: A VW label (not containing characters from escape_dict.keys(),
unless 'escape' mode is on)
value: float giving the weight or magnitude of this feature
### Response:
def add_feature(self, label, value=None):
"""
label: A VW label (not containing characters from escape_dict.keys(),
unless 'escape' mode is on)
value: float giving the weight or magnitude of this feature
"""
if self.escape:
label = escape_vw_string(label)
elif self.validate:
validate_vw_string(label)
feature = (label, value)
self.features.append(feature) |
def ps(self):
"""
Get the process information from the system PS command.
"""
# Get the process ID
pid = self.get()
# Parent / child processes
parent = None
children = []
# If the process is running
if pid:
proc = Popen(['ps', '-ef'], stdout=PIPE)
for _line in proc.stdout.readlines():
line = self.unicode(_line.rstrip())
# Get the current PID / parent PID
this_pid, this_parent = self._ps_extract_pid(line)
try:
# If scanning a child process
if int(pid) == int(this_parent):
children.append('{}; [{}]'.format(this_pid.rstrip(), re.sub(' +', ' ', line)))
# If scanning the parent process
if int(pid) == int(this_pid):
parent = re.sub(' +', ' ', line)
# Ignore value errors
except ValueError:
continue
# Return the parent PID and any children processes
return (parent, children) | Get the process information from the system PS command. | Below is the the instruction that describes the task:
### Input:
Get the process information from the system PS command.
### Response:
def ps(self):
"""
Get the process information from the system PS command.
"""
# Get the process ID
pid = self.get()
# Parent / child processes
parent = None
children = []
# If the process is running
if pid:
proc = Popen(['ps', '-ef'], stdout=PIPE)
for _line in proc.stdout.readlines():
line = self.unicode(_line.rstrip())
# Get the current PID / parent PID
this_pid, this_parent = self._ps_extract_pid(line)
try:
# If scanning a child process
if int(pid) == int(this_parent):
children.append('{}; [{}]'.format(this_pid.rstrip(), re.sub(' +', ' ', line)))
# If scanning the parent process
if int(pid) == int(this_pid):
parent = re.sub(' +', ' ', line)
# Ignore value errors
except ValueError:
continue
# Return the parent PID and any children processes
return (parent, children) |
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = [] | Clear all list before sync : feeds and categories | Below is the the instruction that describes the task:
### Input:
Clear all list before sync : feeds and categories
### Response:
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = [] |
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--sourcedir', '-s', required=True, action='store',
help='Source folder containing python files.')
arg_parser.add_argument('--docfile', '-o', required=True, action='store',
help='Name of markdown file to write output to.')
arg_parser.add_argument('--projectname', '-n', required=False, action='store',
help='Project name (optional, otherwise sourcedir will be used).')
arg_parser.add_argument('--codelinks', '-c', required=False, action='store_true',
help='Include links to source files (optional).')
args = arg_parser.parse_args()
source_dir = args.sourcedir
doc_file = args.docfile
code_links = args.codelinks
proj_name = args.projectname
if proj_name is None:
proj_name = source_dir
# main document dictionary
meta_doc = {'header': proj_name + ' Technical Reference Guide'}
meta_doc['modules'] = []
# process each file
for source_file in glob.glob(source_dir + '/*.py'):
if '__' in source_file:
print('Skipping: ' + source_file)
continue
file_meta_doc = process_file(source_file)
meta_doc['modules'].append(file_meta_doc)
# create output file
process_output(meta_doc, doc_file, code_links) | Main routine. | Below is the the instruction that describes the task:
### Input:
Main routine.
### Response:
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--sourcedir', '-s', required=True, action='store',
help='Source folder containing python files.')
arg_parser.add_argument('--docfile', '-o', required=True, action='store',
help='Name of markdown file to write output to.')
arg_parser.add_argument('--projectname', '-n', required=False, action='store',
help='Project name (optional, otherwise sourcedir will be used).')
arg_parser.add_argument('--codelinks', '-c', required=False, action='store_true',
help='Include links to source files (optional).')
args = arg_parser.parse_args()
source_dir = args.sourcedir
doc_file = args.docfile
code_links = args.codelinks
proj_name = args.projectname
if proj_name is None:
proj_name = source_dir
# main document dictionary
meta_doc = {'header': proj_name + ' Technical Reference Guide'}
meta_doc['modules'] = []
# process each file
for source_file in glob.glob(source_dir + '/*.py'):
if '__' in source_file:
print('Skipping: ' + source_file)
continue
file_meta_doc = process_file(source_file)
meta_doc['modules'].append(file_meta_doc)
# create output file
process_output(meta_doc, doc_file, code_links) |
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name] | set current section during parsing | Below is the the instruction that describes the task:
### Input:
set current section during parsing
### Response:
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name] |
def update_account(self, account):
"""
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
url = ACCOUNTS_API.format(account.account_id)
body = {"account": {"name": account.name}}
return CanvasAccount(data=self._put_resource(url, body)) | Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update | Below is the the instruction that describes the task:
### Input:
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
### Response:
def update_account(self, account):
"""
Update the passed account. Returns the updated account.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.update
"""
url = ACCOUNTS_API.format(account.account_id)
body = {"account": {"name": account.name}}
return CanvasAccount(data=self._put_resource(url, body)) |
def _get_path_pattern_tornado4(self):
"""Return the path pattern used when routing a request. (Tornado<4.5)
:rtype: str
"""
for host, handlers in self.application.handlers:
if host.match(self.request.host):
for handler in handlers:
if handler.regex.match(self.request.path):
return handler.regex.pattern | Return the path pattern used when routing a request. (Tornado<4.5)
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return the path pattern used when routing a request. (Tornado<4.5)
:rtype: str
### Response:
def _get_path_pattern_tornado4(self):
"""Return the path pattern used when routing a request. (Tornado<4.5)
:rtype: str
"""
for host, handlers in self.application.handlers:
if host.match(self.request.host):
for handler in handlers:
if handler.regex.match(self.request.path):
return handler.regex.pattern |
def _clean_workers(self):
"""Delete periodically workers in workers bag."""
while self._bag_collector:
self._bag_collector.popleft()
self._timer_worker_delete.stop() | Delete periodically workers in workers bag. | Below is the the instruction that describes the task:
### Input:
Delete periodically workers in workers bag.
### Response:
def _clean_workers(self):
"""Delete periodically workers in workers bag."""
while self._bag_collector:
self._bag_collector.popleft()
self._timer_worker_delete.stop() |
def import_job(db, calc_id, calc_mode, description, user_name, status,
hc_id, datadir):
"""
Insert a calculation inside the database, if calc_id is not taken
"""
job = dict(id=calc_id,
calculation_mode=calc_mode,
description=description,
user_name=user_name,
hazard_calculation_id=hc_id,
is_running=0,
status=status,
ds_calc_dir=os.path.join('%s/calc_%s' % (datadir, calc_id)))
db('INSERT INTO job (?S) VALUES (?X)', job.keys(), job.values()) | Insert a calculation inside the database, if calc_id is not taken | Below is the the instruction that describes the task:
### Input:
Insert a calculation inside the database, if calc_id is not taken
### Response:
def import_job(db, calc_id, calc_mode, description, user_name, status,
hc_id, datadir):
"""
Insert a calculation inside the database, if calc_id is not taken
"""
job = dict(id=calc_id,
calculation_mode=calc_mode,
description=description,
user_name=user_name,
hazard_calculation_id=hc_id,
is_running=0,
status=status,
ds_calc_dir=os.path.join('%s/calc_%s' % (datadir, calc_id)))
db('INSERT INTO job (?S) VALUES (?X)', job.keys(), job.values()) |
def get_namespace(self):
"""
Find the name the user is using to access holoviews.
"""
if 'holoviews' not in sys.modules:
raise ImportError('HoloViews does not seem to be imported')
matches = [k for k,v in get_ipython().user_ns.items() # noqa (get_ipython)
if not k.startswith('_') and v is sys.modules['holoviews']]
if len(matches) == 0:
raise Exception("Could not find holoviews module in namespace")
return '%s.archive' % matches[0] | Find the name the user is using to access holoviews. | Below is the the instruction that describes the task:
### Input:
Find the name the user is using to access holoviews.
### Response:
def get_namespace(self):
"""
Find the name the user is using to access holoviews.
"""
if 'holoviews' not in sys.modules:
raise ImportError('HoloViews does not seem to be imported')
matches = [k for k,v in get_ipython().user_ns.items() # noqa (get_ipython)
if not k.startswith('_') and v is sys.modules['holoviews']]
if len(matches) == 0:
raise Exception("Could not find holoviews module in namespace")
return '%s.archive' % matches[0] |
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item | Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters. | Below is the the instruction that describes the task:
### Input:
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
### Response:
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item |
def seek_to(self, position):
"""Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
"""
# 0) We have no way to associate the date with a position,
# so we have to scan the shard from the beginning.
self.jump_to(iterator_type="trim_horizon")
position = int(position.timestamp())
while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD):
records = self.get_records()
# We can skip the whole record set if the newest (last) record isn't new enough.
if records and records[-1]["meta"]["created_at"].timestamp() >= position:
# Looking for the first number *below* the position.
for offset, record in enumerate(reversed(records)):
if record["meta"]["created_at"].timestamp() < position:
index = len(records) - offset
return records[index:]
return records
# Either exhausted the Shard or caught up to HEAD.
return [] | Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty. | Below is the the instruction that describes the task:
### Input:
Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
### Response:
def seek_to(self, position):
"""Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
"""
# 0) We have no way to associate the date with a position,
# so we have to scan the shard from the beginning.
self.jump_to(iterator_type="trim_horizon")
position = int(position.timestamp())
while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD):
records = self.get_records()
# We can skip the whole record set if the newest (last) record isn't new enough.
if records and records[-1]["meta"]["created_at"].timestamp() >= position:
# Looking for the first number *below* the position.
for offset, record in enumerate(reversed(records)):
if record["meta"]["created_at"].timestamp() < position:
index = len(records) - offset
return records[index:]
return records
# Either exhausted the Shard or caught up to HEAD.
return [] |
def proximal(self):
"""Return the ``proximal factory`` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
`proximal factory` for the L1-norm.
"""
if self.pointwise_norm.exponent == 1:
return proximal_l1(space=self.domain)
elif self.pointwise_norm.exponent == 2:
return proximal_l1_l2(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p = 1 '
'or 2') | Return the ``proximal factory`` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
`proximal factory` for the L1-norm. | Below is the the instruction that describes the task:
### Input:
Return the ``proximal factory`` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
`proximal factory` for the L1-norm.
### Response:
def proximal(self):
"""Return the ``proximal factory`` of the functional.
See Also
--------
odl.solvers.nonsmooth.proximal_operators.proximal_l1 :
`proximal factory` for the L1-norm.
"""
if self.pointwise_norm.exponent == 1:
return proximal_l1(space=self.domain)
elif self.pointwise_norm.exponent == 2:
return proximal_l1_l2(space=self.domain)
else:
raise NotImplementedError('`proximal` only implemented for p = 1 '
'or 2') |
def find(self, *args, **kwargs):
"""Same as :meth:`pymongo.collection.Collection.find`, except
it returns the right document class.
"""
return Cursor(self, *args, wrap=self.document_class, **kwargs) | Same as :meth:`pymongo.collection.Collection.find`, except
it returns the right document class. | Below is the the instruction that describes the task:
### Input:
Same as :meth:`pymongo.collection.Collection.find`, except
it returns the right document class.
### Response:
def find(self, *args, **kwargs):
"""Same as :meth:`pymongo.collection.Collection.find`, except
it returns the right document class.
"""
return Cursor(self, *args, wrap=self.document_class, **kwargs) |
def textile(text, **kwargs):
"""
Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter.
"""
from django.contrib.markup.templatetags.markup import textile
return textile(text) | Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter. | Below is the the instruction that describes the task:
### Input:
Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter.
### Response:
def textile(text, **kwargs):
"""
Applies Textile conversion to a string, and returns the HTML.
This is simply a pass-through to the ``textile`` template filter
included in ``django.contrib.markup``, which works around issues
PyTextile has with Unicode strings. If you're not using Django but
want to use Textile with ``MarkupFormatter``, you'll need to
supply your own Textile filter.
"""
from django.contrib.markup.templatetags.markup import textile
return textile(text) |
def describe_reserved_db_instances_offerings(ReservedDBInstancesOfferingId=None, DBInstanceClass=None, Duration=None, ProductDescription=None, OfferingType=None, MultiAZ=None, Filters=None, MaxRecords=None, Marker=None):
"""
Lists available reserved DB instance offerings.
See also: AWS API Documentation
Examples
This example lists information for all reserved DB instance offerings for the specified DB instance class, duration, product, offering type, and availability zone settings.
Expected Output:
:example: response = client.describe_reserved_db_instances_offerings(
ReservedDBInstancesOfferingId='string',
DBInstanceClass='string',
Duration='string',
ProductDescription='string',
OfferingType='string',
MultiAZ=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type ReservedDBInstancesOfferingId: string
:param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type DBInstanceClass: string
:param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.
:type Duration: string
:param Duration: Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.
Valid Values: 1 | 3 | 31536000 | 94608000
:type ProductDescription: string
:param ProductDescription: Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.
:type OfferingType: string
:param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.
Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront'
:type MultiAZ: boolean
:param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.
:type Filters: list
:param Filters: This parameter is not currently supported.
(dict) --This type is not currently supported.
Name (string) -- [REQUIRED]This parameter is not currently supported.
Values (list) -- [REQUIRED]This parameter is not currently supported.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'ReservedDBInstancesOfferings': [
{
'ReservedDBInstancesOfferingId': 'string',
'DBInstanceClass': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ProductDescription': 'string',
'OfferingType': 'string',
'MultiAZ': True|False,
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
"""
pass | Lists available reserved DB instance offerings.
See also: AWS API Documentation
Examples
This example lists information for all reserved DB instance offerings for the specified DB instance class, duration, product, offering type, and availability zone settings.
Expected Output:
:example: response = client.describe_reserved_db_instances_offerings(
ReservedDBInstancesOfferingId='string',
DBInstanceClass='string',
Duration='string',
ProductDescription='string',
OfferingType='string',
MultiAZ=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type ReservedDBInstancesOfferingId: string
:param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type DBInstanceClass: string
:param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.
:type Duration: string
:param Duration: Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.
Valid Values: 1 | 3 | 31536000 | 94608000
:type ProductDescription: string
:param ProductDescription: Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.
:type OfferingType: string
:param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.
Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront'
:type MultiAZ: boolean
:param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.
:type Filters: list
:param Filters: This parameter is not currently supported.
(dict) --This type is not currently supported.
Name (string) -- [REQUIRED]This parameter is not currently supported.
Values (list) -- [REQUIRED]This parameter is not currently supported.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'ReservedDBInstancesOfferings': [
{
'ReservedDBInstancesOfferingId': 'string',
'DBInstanceClass': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ProductDescription': 'string',
'OfferingType': 'string',
'MultiAZ': True|False,
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
} | Below is the the instruction that describes the task:
### Input:
Lists available reserved DB instance offerings.
See also: AWS API Documentation
Examples
This example lists information for all reserved DB instance offerings for the specified DB instance class, duration, product, offering type, and availability zone settings.
Expected Output:
:example: response = client.describe_reserved_db_instances_offerings(
ReservedDBInstancesOfferingId='string',
DBInstanceClass='string',
Duration='string',
ProductDescription='string',
OfferingType='string',
MultiAZ=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type ReservedDBInstancesOfferingId: string
:param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type DBInstanceClass: string
:param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.
:type Duration: string
:param Duration: Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.
Valid Values: 1 | 3 | 31536000 | 94608000
:type ProductDescription: string
:param ProductDescription: Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.
:type OfferingType: string
:param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.
Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront'
:type MultiAZ: boolean
:param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.
:type Filters: list
:param Filters: This parameter is not currently supported.
(dict) --This type is not currently supported.
Name (string) -- [REQUIRED]This parameter is not currently supported.
Values (list) -- [REQUIRED]This parameter is not currently supported.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'ReservedDBInstancesOfferings': [
{
'ReservedDBInstancesOfferingId': 'string',
'DBInstanceClass': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ProductDescription': 'string',
'OfferingType': 'string',
'MultiAZ': True|False,
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
### Response:
def describe_reserved_db_instances_offerings(ReservedDBInstancesOfferingId=None, DBInstanceClass=None, Duration=None, ProductDescription=None, OfferingType=None, MultiAZ=None, Filters=None, MaxRecords=None, Marker=None):
"""
Lists available reserved DB instance offerings.
See also: AWS API Documentation
Examples
This example lists information for all reserved DB instance offerings for the specified DB instance class, duration, product, offering type, and availability zone settings.
Expected Output:
:example: response = client.describe_reserved_db_instances_offerings(
ReservedDBInstancesOfferingId='string',
DBInstanceClass='string',
Duration='string',
ProductDescription='string',
OfferingType='string',
MultiAZ=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type ReservedDBInstancesOfferingId: string
:param ReservedDBInstancesOfferingId: The offering identifier filter value. Specify this parameter to show only the available offering that matches the specified reservation identifier.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type DBInstanceClass: string
:param DBInstanceClass: The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.
:type Duration: string
:param Duration: Duration filter value, specified in years or seconds. Specify this parameter to show only reservations for this duration.
Valid Values: 1 | 3 | 31536000 | 94608000
:type ProductDescription: string
:param ProductDescription: Product description filter value. Specify this parameter to show only the available offerings matching the specified product description.
:type OfferingType: string
:param OfferingType: The offering type filter value. Specify this parameter to show only the available offerings matching the specified offering type.
Valid Values: 'Partial Upfront' | 'All Upfront' | 'No Upfront'
:type MultiAZ: boolean
:param MultiAZ: The Multi-AZ filter value. Specify this parameter to show only the available offerings matching the specified Multi-AZ parameter.
:type Filters: list
:param Filters: This parameter is not currently supported.
(dict) --This type is not currently supported.
Name (string) -- [REQUIRED]This parameter is not currently supported.
Values (list) -- [REQUIRED]This parameter is not currently supported.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so that the following results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'ReservedDBInstancesOfferings': [
{
'ReservedDBInstancesOfferingId': 'string',
'DBInstanceClass': 'string',
'Duration': 123,
'FixedPrice': 123.0,
'UsagePrice': 123.0,
'CurrencyCode': 'string',
'ProductDescription': 'string',
'OfferingType': 'string',
'MultiAZ': True|False,
'RecurringCharges': [
{
'RecurringChargeAmount': 123.0,
'RecurringChargeFrequency': 'string'
},
]
},
]
}
"""
pass |
def save_matlab_model(model, file_name, varname=None):
"""Save the cobra model as a .mat file.
This .mat file can be used directly in the MATLAB version of COBRA.
Parameters
----------
model : cobra.core.Model.Model object
The model to save
file_name : str or file-like object
The file to save to
varname : string
The name of the variable within the workspace
"""
if not scipy_io:
raise ImportError('load_matlab_model requires scipy')
if varname is None:
varname = str(model.id) \
if model.id is not None and len(model.id) > 0 \
else "exported_model"
mat = create_mat_dict(model)
scipy_io.savemat(file_name, {varname: mat},
appendmat=True, oned_as="column") | Save the cobra model as a .mat file.
This .mat file can be used directly in the MATLAB version of COBRA.
Parameters
----------
model : cobra.core.Model.Model object
The model to save
file_name : str or file-like object
The file to save to
varname : string
The name of the variable within the workspace | Below is the the instruction that describes the task:
### Input:
Save the cobra model as a .mat file.
This .mat file can be used directly in the MATLAB version of COBRA.
Parameters
----------
model : cobra.core.Model.Model object
The model to save
file_name : str or file-like object
The file to save to
varname : string
The name of the variable within the workspace
### Response:
def save_matlab_model(model, file_name, varname=None):
"""Save the cobra model as a .mat file.
This .mat file can be used directly in the MATLAB version of COBRA.
Parameters
----------
model : cobra.core.Model.Model object
The model to save
file_name : str or file-like object
The file to save to
varname : string
The name of the variable within the workspace
"""
if not scipy_io:
raise ImportError('load_matlab_model requires scipy')
if varname is None:
varname = str(model.id) \
if model.id is not None and len(model.id) > 0 \
else "exported_model"
mat = create_mat_dict(model)
scipy_io.savemat(file_name, {varname: mat},
appendmat=True, oned_as="column") |
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) | Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)] | Below is the the instruction that describes the task:
### Input:
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
### Response:
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) |
def db_continue( block_id, consensus_hash ):
"""
(required by virtualchain state engine)
Called when virtualchain has synchronized all state for this block.
Blockstack uses this as a preemption point where it can safely
exit if the user has so requested.
"""
# every so often, clean up
if (block_id % 20) == 0:
log.debug("Pre-emptive garbage collection at %s" % block_id)
gc.collect(2)
return is_running() or os.environ.get("BLOCKSTACK_TEST") == "1" | (required by virtualchain state engine)
Called when virtualchain has synchronized all state for this block.
Blockstack uses this as a preemption point where it can safely
exit if the user has so requested. | Below is the the instruction that describes the task:
### Input:
(required by virtualchain state engine)
Called when virtualchain has synchronized all state for this block.
Blockstack uses this as a preemption point where it can safely
exit if the user has so requested.
### Response:
def db_continue( block_id, consensus_hash ):
"""
(required by virtualchain state engine)
Called when virtualchain has synchronized all state for this block.
Blockstack uses this as a preemption point where it can safely
exit if the user has so requested.
"""
# every so often, clean up
if (block_id % 20) == 0:
log.debug("Pre-emptive garbage collection at %s" % block_id)
gc.collect(2)
return is_running() or os.environ.get("BLOCKSTACK_TEST") == "1" |
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults) | Prints all configuration registries for debugging purposes. | Below is the the instruction that describes the task:
### Input:
Prints all configuration registries for debugging purposes.
### Response:
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults) |
def _match_serializers_by_query_arg(self, serializers):
"""Match serializer by query arg."""
# if the format query argument is present, match the serializer
arg_name = current_app.config.get('REST_MIMETYPE_QUERY_ARG_NAME')
if arg_name:
arg_value = request.args.get(arg_name, None)
if arg_value is None:
return None
# Search for the serializer matching the format
try:
return serializers[
self.serializers_query_aliases[arg_value]]
except KeyError: # either no serializer for this format
return None
return None | Match serializer by query arg. | Below is the the instruction that describes the task:
### Input:
Match serializer by query arg.
### Response:
def _match_serializers_by_query_arg(self, serializers):
"""Match serializer by query arg."""
# if the format query argument is present, match the serializer
arg_name = current_app.config.get('REST_MIMETYPE_QUERY_ARG_NAME')
if arg_name:
arg_value = request.args.get(arg_name, None)
if arg_value is None:
return None
# Search for the serializer matching the format
try:
return serializers[
self.serializers_query_aliases[arg_value]]
except KeyError: # either no serializer for this format
return None
return None |
def encode(number, alphabet):
"""
Converts an integer to a base n string where n is the length of the
provided alphabet.
Modified from http://en.wikipedia.org/wiki/Base_36
"""
if not isinstance(number, (int, long)):
raise TypeError("Number must be an integer.")
base_n = ""
sign = ""
if number < 0:
sign = "-"
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base_n = alphabet[i] + base_n
return sign + base_n | Converts an integer to a base n string where n is the length of the
provided alphabet.
Modified from http://en.wikipedia.org/wiki/Base_36 | Below is the the instruction that describes the task:
### Input:
Converts an integer to a base n string where n is the length of the
provided alphabet.
Modified from http://en.wikipedia.org/wiki/Base_36
### Response:
def encode(number, alphabet):
"""
Converts an integer to a base n string where n is the length of the
provided alphabet.
Modified from http://en.wikipedia.org/wiki/Base_36
"""
if not isinstance(number, (int, long)):
raise TypeError("Number must be an integer.")
base_n = ""
sign = ""
if number < 0:
sign = "-"
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base_n = alphabet[i] + base_n
return sign + base_n |
def get_vm_config_file(name, datacenter, placement, datastore,
service_instance=None):
'''
Queries the virtual machine config file and returns
vim.host.DatastoreBrowser.SearchResults object on success None on failure
name
Name of the virtual machine
datacenter
Datacenter name
datastore
Datastore where the virtual machine files are stored
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
'''
browser_spec = vim.host.DatastoreBrowser.SearchSpec()
directory = name
browser_spec.query = [vim.host.DatastoreBrowser.VmConfigQuery()]
datacenter_object = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
if 'cluster' in placement:
container_object = salt.utils.vmware.get_cluster(datacenter_object,
placement['cluster'])
else:
container_objects = salt.utils.vmware.get_hosts(
service_instance,
datacenter_name=datacenter,
host_names=[placement['host']])
if not container_objects:
raise salt.exceptions.VMwareObjectRetrievalError(
'ESXi host named \'{0}\' wasn\'t '
'found.'.format(placement['host']))
container_object = container_objects[0]
# list of vim.host.DatastoreBrowser.SearchResults objects
files = salt.utils.vmware.get_datastore_files(service_instance,
directory,
[datastore],
container_object,
browser_spec)
if files and len(files[0].file) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(
'Multiple configuration files found in '
'the same virtual machine folder')
elif files and files[0].file:
return files[0]
else:
return None | Queries the virtual machine config file and returns
vim.host.DatastoreBrowser.SearchResults object on success None on failure
name
Name of the virtual machine
datacenter
Datacenter name
datastore
Datastore where the virtual machine files are stored
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None. | Below is the the instruction that describes the task:
### Input:
Queries the virtual machine config file and returns
vim.host.DatastoreBrowser.SearchResults object on success None on failure
name
Name of the virtual machine
datacenter
Datacenter name
datastore
Datastore where the virtual machine files are stored
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
### Response:
def get_vm_config_file(name, datacenter, placement, datastore,
service_instance=None):
'''
Queries the virtual machine config file and returns
vim.host.DatastoreBrowser.SearchResults object on success None on failure
name
Name of the virtual machine
datacenter
Datacenter name
datastore
Datastore where the virtual machine files are stored
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
'''
browser_spec = vim.host.DatastoreBrowser.SearchSpec()
directory = name
browser_spec.query = [vim.host.DatastoreBrowser.VmConfigQuery()]
datacenter_object = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
if 'cluster' in placement:
container_object = salt.utils.vmware.get_cluster(datacenter_object,
placement['cluster'])
else:
container_objects = salt.utils.vmware.get_hosts(
service_instance,
datacenter_name=datacenter,
host_names=[placement['host']])
if not container_objects:
raise salt.exceptions.VMwareObjectRetrievalError(
'ESXi host named \'{0}\' wasn\'t '
'found.'.format(placement['host']))
container_object = container_objects[0]
# list of vim.host.DatastoreBrowser.SearchResults objects
files = salt.utils.vmware.get_datastore_files(service_instance,
directory,
[datastore],
container_object,
browser_spec)
if files and len(files[0].file) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(
'Multiple configuration files found in '
'the same virtual machine folder')
elif files and files[0].file:
return files[0]
else:
return None |
def _get_context():
"""Determine the most specific context that we're in.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
import google.colab
import IPython
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE | Determine the most specific context that we're in.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell). | Below is the the instruction that describes the task:
### Input:
Determine the most specific context that we're in.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
### Response:
def _get_context():
"""Determine the most specific context that we're in.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
import google.colab
import IPython
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE |
def partial_transform(self, sequence, mode='clip'):
"""Transform a sequence to internal indexing
Recall that `sequence` can be arbitrary labels, whereas ``transmat_``
and ``countsmat_`` are indexed with integers between 0 and
``n_states - 1``. This methods maps a set of sequences from the labels
onto this internal indexing.
Parameters
----------
sequence : array-like
A 1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
mode : {'clip', 'fill'}
Method by which to treat labels in `sequence` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
mapped_sequence : list or ndarray
If mode is "fill", return an ndarray in internal indexing.
If mode is "clip", return a list of ndarrays each in internal
indexing.
"""
if mode not in ['clip', 'fill']:
raise ValueError('mode must be one of ["clip", "fill"]: %s' % mode)
sequence = np.asarray(sequence)
if sequence.ndim != 1:
raise ValueError("Each sequence must be 1D")
f = np.vectorize(lambda k: self.mapping_.get(k, np.nan),
otypes=[np.float])
a = f(sequence)
if mode == 'fill':
if np.all(np.mod(a, 1) == 0):
result = a.astype(int)
else:
result = a
elif mode == 'clip':
result = [a[s].astype(int) for s in
np.ma.clump_unmasked(np.ma.masked_invalid(a))]
else:
raise RuntimeError()
return result | Transform a sequence to internal indexing
Recall that `sequence` can be arbitrary labels, whereas ``transmat_``
and ``countsmat_`` are indexed with integers between 0 and
``n_states - 1``. This methods maps a set of sequences from the labels
onto this internal indexing.
Parameters
----------
sequence : array-like
A 1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
mode : {'clip', 'fill'}
Method by which to treat labels in `sequence` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
mapped_sequence : list or ndarray
If mode is "fill", return an ndarray in internal indexing.
If mode is "clip", return a list of ndarrays each in internal
indexing. | Below is the the instruction that describes the task:
### Input:
Transform a sequence to internal indexing
Recall that `sequence` can be arbitrary labels, whereas ``transmat_``
and ``countsmat_`` are indexed with integers between 0 and
``n_states - 1``. This methods maps a set of sequences from the labels
onto this internal indexing.
Parameters
----------
sequence : array-like
A 1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
mode : {'clip', 'fill'}
Method by which to treat labels in `sequence` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
mapped_sequence : list or ndarray
If mode is "fill", return an ndarray in internal indexing.
If mode is "clip", return a list of ndarrays each in internal
indexing.
### Response:
def partial_transform(self, sequence, mode='clip'):
"""Transform a sequence to internal indexing
Recall that `sequence` can be arbitrary labels, whereas ``transmat_``
and ``countsmat_`` are indexed with integers between 0 and
``n_states - 1``. This methods maps a set of sequences from the labels
onto this internal indexing.
Parameters
----------
sequence : array-like
A 1D iterable of state labels. Labels can be integers, strings, or
other orderable objects.
mode : {'clip', 'fill'}
Method by which to treat labels in `sequence` which do not have
a corresponding index. This can be due, for example, to the ergodic
trimming step.
``clip``
Unmapped labels are removed during transform. If they occur
at the beginning or end of a sequence, the resulting transformed
sequence will be shorted. If they occur in the middle of a
sequence, that sequence will be broken into two (or more)
sequences. (Default)
``fill``
Unmapped labels will be replaced with NaN, to signal missing
data. [The use of NaN to signal missing data is not fantastic,
but it's consistent with current behavior of the ``pandas``
library.]
Returns
-------
mapped_sequence : list or ndarray
If mode is "fill", return an ndarray in internal indexing.
If mode is "clip", return a list of ndarrays each in internal
indexing.
"""
if mode not in ['clip', 'fill']:
raise ValueError('mode must be one of ["clip", "fill"]: %s' % mode)
sequence = np.asarray(sequence)
if sequence.ndim != 1:
raise ValueError("Each sequence must be 1D")
f = np.vectorize(lambda k: self.mapping_.get(k, np.nan),
otypes=[np.float])
a = f(sequence)
if mode == 'fill':
if np.all(np.mod(a, 1) == 0):
result = a.astype(int)
else:
result = a
elif mode == 'clip':
result = [a[s].astype(int) for s in
np.ma.clump_unmasked(np.ma.masked_invalid(a))]
else:
raise RuntimeError()
return result |
def blow_out(self, location=None):
"""
Force any remaining liquid to dispense, by moving
this pipette's plunger to the calibrated `blow_out` position
Notes
-----
If no `location` is passed, the pipette will blow_out
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the blow_out.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50).dispense().blow_out() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot 'blow out' without a tip attached.")
self.move_to(location)
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=self._get_plunger_position('blow_out')
)
self.current_volume = 0
return self | Force any remaining liquid to dispense, by moving
this pipette's plunger to the calibrated `blow_out` position
Notes
-----
If no `location` is passed, the pipette will blow_out
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the blow_out.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50).dispense().blow_out() # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Force any remaining liquid to dispense, by moving
this pipette's plunger to the calibrated `blow_out` position
Notes
-----
If no `location` is passed, the pipette will blow_out
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the blow_out.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50).dispense().blow_out() # doctest: +SKIP
### Response:
def blow_out(self, location=None):
"""
Force any remaining liquid to dispense, by moving
this pipette's plunger to the calibrated `blow_out` position
Notes
-----
If no `location` is passed, the pipette will blow_out
from it's current position.
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the blow_out.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.aspirate(50).dispense().blow_out() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot 'blow out' without a tip attached.")
self.move_to(location)
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=self._get_plunger_position('blow_out')
)
self.current_volume = 0
return self |
async def auth_crypt(wallet_handle: int,
sender_vk: str,
recipient_vk: str,
msg: bytes) -> bytes:
"""
**** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD ****
Encrypt a message by authenticated-encryption scheme.
Sender can encrypt a confidential message specifically for Recipient, using Sender's public key.
Using Recipient's public key, Sender can compute a shared secret key.
Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key.
That shared secret key can be used to verify that the encrypted message was not tampered with,
before eventually decrypting it.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey)
for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or
indy_create_and_store_my_did
:param recipient_vk: id (verkey) of their key
:param msg: a message to be signed
:return: encrypted message as an array of bytes
"""
logger = logging.getLogger(__name__)
logger.debug("auth_crypt: >>> wallet_handle: %r,sender_vk: %r, recipient_vk: %r, msg: %r",
wallet_handle,
sender_vk,
recipient_vk,
msg)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(auth_crypt, "cb"):
logger.debug("auth_crypt: Creating callback")
auth_crypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_sender_vk = c_char_p(sender_vk.encode('utf-8'))
c_recipient_vk = c_char_p(recipient_vk.encode('utf-8'))
c_msg_len = c_uint32(len(msg))
res = await do_call('indy_crypto_auth_crypt',
c_wallet_handle,
c_sender_vk,
c_recipient_vk,
msg,
c_msg_len,
auth_crypt.cb)
logger.debug("auth_crypt: <<< res: %r", res)
return res | **** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD ****
Encrypt a message by authenticated-encryption scheme.
Sender can encrypt a confidential message specifically for Recipient, using Sender's public key.
Using Recipient's public key, Sender can compute a shared secret key.
Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key.
That shared secret key can be used to verify that the encrypted message was not tampered with,
before eventually decrypting it.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey)
for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or
indy_create_and_store_my_did
:param recipient_vk: id (verkey) of their key
:param msg: a message to be signed
:return: encrypted message as an array of bytes | Below is the the instruction that describes the task:
### Input:
**** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD ****
Encrypt a message by authenticated-encryption scheme.
Sender can encrypt a confidential message specifically for Recipient, using Sender's public key.
Using Recipient's public key, Sender can compute a shared secret key.
Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key.
That shared secret key can be used to verify that the encrypted message was not tampered with,
before eventually decrypting it.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey)
for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or
indy_create_and_store_my_did
:param recipient_vk: id (verkey) of their key
:param msg: a message to be signed
:return: encrypted message as an array of bytes
### Response:
async def auth_crypt(wallet_handle: int,
sender_vk: str,
recipient_vk: str,
msg: bytes) -> bytes:
"""
**** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD ****
Encrypt a message by authenticated-encryption scheme.
Sender can encrypt a confidential message specifically for Recipient, using Sender's public key.
Using Recipient's public key, Sender can compute a shared secret key.
Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key.
That shared secret key can be used to verify that the encrypted message was not tampered with,
before eventually decrypting it.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey)
for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or
indy_create_and_store_my_did
:param recipient_vk: id (verkey) of their key
:param msg: a message to be signed
:return: encrypted message as an array of bytes
"""
logger = logging.getLogger(__name__)
logger.debug("auth_crypt: >>> wallet_handle: %r,sender_vk: %r, recipient_vk: %r, msg: %r",
wallet_handle,
sender_vk,
recipient_vk,
msg)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(auth_crypt, "cb"):
logger.debug("auth_crypt: Creating callback")
auth_crypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_sender_vk = c_char_p(sender_vk.encode('utf-8'))
c_recipient_vk = c_char_p(recipient_vk.encode('utf-8'))
c_msg_len = c_uint32(len(msg))
res = await do_call('indy_crypto_auth_crypt',
c_wallet_handle,
c_sender_vk,
c_recipient_vk,
msg,
c_msg_len,
auth_crypt.cb)
logger.debug("auth_crypt: <<< res: %r", res)
return res |
def submit(self, new_queue = None):
"""Sets the status of this job to 'submitted'."""
self.status = 'submitted'
self.result = None
self.machine_name = None
if new_queue is not None:
self.queue_name = new_queue
for array_job in self.array:
array_job.status = 'submitted'
array_job.result = None
array_job.machine_name = None
self.submit_time = datetime.now()
self.start_time = None
self.finish_time = None | Sets the status of this job to 'submitted'. | Below is the the instruction that describes the task:
### Input:
Sets the status of this job to 'submitted'.
### Response:
def submit(self, new_queue = None):
"""Sets the status of this job to 'submitted'."""
self.status = 'submitted'
self.result = None
self.machine_name = None
if new_queue is not None:
self.queue_name = new_queue
for array_job in self.array:
array_job.status = 'submitted'
array_job.result = None
array_job.machine_name = None
self.submit_time = datetime.now()
self.start_time = None
self.finish_time = None |
def _select_by_field_or_tag(self, tag=None, field=None):
"""For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
# Get the set of fields whose values will be included in the value
if field is not None:
# Select just the specified field (checking the field exists)
field_obj = self.fields.get_field(field, self.field_values)
selected_fields = OrderedDict([(field, field_obj)])
elif tag is not None:
# Select just fields with the specified tag
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values)
if tag in f.tags)
# Fail if no fields match the supplied tag. Because tags are
# applied to parent fields in the hierarchy, it is guaranteed that
# if a tag exists, at least one top-level (i.e. always present)
# field will have the tag.
if not selected_fields:
raise UnknownTagError(tag)
else:
# No specific field/tag supplied, select all enabled fields.
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values))
return selected_fields | For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available. | Below is the the instruction that describes the task:
### Input:
For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
### Response:
def _select_by_field_or_tag(self, tag=None, field=None):
"""For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
# Get the set of fields whose values will be included in the value
if field is not None:
# Select just the specified field (checking the field exists)
field_obj = self.fields.get_field(field, self.field_values)
selected_fields = OrderedDict([(field, field_obj)])
elif tag is not None:
# Select just fields with the specified tag
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values)
if tag in f.tags)
# Fail if no fields match the supplied tag. Because tags are
# applied to parent fields in the hierarchy, it is guaranteed that
# if a tag exists, at least one top-level (i.e. always present)
# field will have the tag.
if not selected_fields:
raise UnknownTagError(tag)
else:
# No specific field/tag supplied, select all enabled fields.
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values))
return selected_fields |
def make_temp(string, suffix='', decode=True, delete=True):
""" xmlsec needs files in some cases where only strings exist, hence the
need for this function. It creates a temporary file with the
string as only content.
:param string: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input string might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
ntf = NamedTemporaryFile(suffix=suffix, delete=delete)
# Python3 tempfile requires byte-like object
if not isinstance(string, six.binary_type):
string = string.encode('utf-8')
if decode:
ntf.write(base64.b64decode(string))
else:
ntf.write(string)
ntf.seek(0)
return ntf, ntf.name | xmlsec needs files in some cases where only strings exist, hence the
need for this function. It creates a temporary file with the
string as only content.
:param string: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input string might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function). | Below is the the instruction that describes the task:
### Input:
xmlsec needs files in some cases where only strings exist, hence the
need for this function. It creates a temporary file with the
string as only content.
:param string: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input string might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
### Response:
def make_temp(string, suffix='', decode=True, delete=True):
""" xmlsec needs files in some cases where only strings exist, hence the
need for this function. It creates a temporary file with the
string as only content.
:param string: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input string might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
ntf = NamedTemporaryFile(suffix=suffix, delete=delete)
# Python3 tempfile requires byte-like object
if not isinstance(string, six.binary_type):
string = string.encode('utf-8')
if decode:
ntf.write(base64.b64decode(string))
else:
ntf.write(string)
ntf.seek(0)
return ntf, ntf.name |
def remove_profile(self):
"""Remove the current profile.
Make sure the user is sure.
"""
profile_name = self.profile_combo.currentText()
# noinspection PyTypeChecker
button_selected = QMessageBox.warning(
None,
'Remove Profile',
self.tr('Remove %s.') % profile_name,
QMessageBox.Ok,
QMessageBox.Cancel
)
if button_selected == QMessageBox.Ok:
self.profile_combo.removeItem(
self.profile_combo.currentIndex()
)
self.minimum_needs.remove_profile(profile_name)
self.select_profile(self.profile_combo.currentIndex()) | Remove the current profile.
Make sure the user is sure. | Below is the the instruction that describes the task:
### Input:
Remove the current profile.
Make sure the user is sure.
### Response:
def remove_profile(self):
"""Remove the current profile.
Make sure the user is sure.
"""
profile_name = self.profile_combo.currentText()
# noinspection PyTypeChecker
button_selected = QMessageBox.warning(
None,
'Remove Profile',
self.tr('Remove %s.') % profile_name,
QMessageBox.Ok,
QMessageBox.Cancel
)
if button_selected == QMessageBox.Ok:
self.profile_combo.removeItem(
self.profile_combo.currentIndex()
)
self.minimum_needs.remove_profile(profile_name)
self.select_profile(self.profile_combo.currentIndex()) |
def render_roughpage(request, t):
"""
Internal interface to the rough page view.
"""
import django
if django.VERSION >= (1, 8):
c = {}
response = HttpResponse(t.render(c, request))
else:
c = RequestContext(request)
response = HttpResponse(t.render(c))
return response | Internal interface to the rough page view. | Below is the the instruction that describes the task:
### Input:
Internal interface to the rough page view.
### Response:
def render_roughpage(request, t):
"""
Internal interface to the rough page view.
"""
import django
if django.VERSION >= (1, 8):
c = {}
response = HttpResponse(t.render(c, request))
else:
c = RequestContext(request)
response = HttpResponse(t.render(c))
return response |
def layer_with(self, sample: np.ndarray, value: int) -> np.ndarray:
"""Create an identical 2d array where the second row is filled with value"""
b = np.full((2, len(sample)), value, dtype=float)
b[0] = sample
return b | Create an identical 2d array where the second row is filled with value | Below is the the instruction that describes the task:
### Input:
Create an identical 2d array where the second row is filled with value
### Response:
def layer_with(self, sample: np.ndarray, value: int) -> np.ndarray:
"""Create an identical 2d array where the second row is filled with value"""
b = np.full((2, len(sample)), value, dtype=float)
b[0] = sample
return b |
def url(self):
"""
The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.url | The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task. | Below is the the instruction that describes the task:
### Input:
The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task.
### Response:
def url(self):
"""
The URL present in this inline results. If you want to "click"
this URL to open it in your browser, you should use Python's
`webbrowser.open(url)` for such task.
"""
if isinstance(self.result, types.BotInlineResult):
return self.result.url |
def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file | Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file. | Below is the the instruction that describes the task:
### Input:
Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
### Response:
def _combine_coverages(items, work_dir, input_backs=None):
"""Combine coverage cnns calculated for individual inputs into single file.
Optionally moves over pre-calculated coverage samples from a background file.
"""
out_file = os.path.join(work_dir, "sample_coverages.txt")
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_f:
for data in items:
cov_file = tz.get_in(["depth", "bins", "seq2c"], data)
with open(cov_file) as cov_f:
out_f.write(cov_f.read())
if input_backs:
for input_back in input_backs:
with open(input_back) as in_handle:
for line in in_handle:
if len(line.split()) >= 4:
out_f.write(line)
return out_file |
def extract_common_fields(self, data):
"""Extract fields from a metadata query."""
return dict(
dc=data.get('dc'),
role=data.get('role'),
account_name=data.get('accountname'),
user_id=data.get('user_id'),
login=data.get('login'),
login_url=data.get('login_url'),
api_endpoint=data.get('api_endpoint'),
) | Extract fields from a metadata query. | Below is the the instruction that describes the task:
### Input:
Extract fields from a metadata query.
### Response:
def extract_common_fields(self, data):
"""Extract fields from a metadata query."""
return dict(
dc=data.get('dc'),
role=data.get('role'),
account_name=data.get('accountname'),
user_id=data.get('user_id'),
login=data.get('login'),
login_url=data.get('login_url'),
api_endpoint=data.get('api_endpoint'),
) |
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None, callback=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
# Set the Transfer Encoding for Streams.
headers['Transfer-Encoding'] = 'chunked'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback) | Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. | Below is the the instruction that describes the task:
### Input:
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
### Response:
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None, callback=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
# Set the Transfer Encoding for Streams.
headers['Transfer-Encoding'] = 'chunked'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
def existence_tested(k):
if k:
if callable(callback):
callback(False)
else:
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback)
self.bucket.lookup(self.name, callback=existence_tested)
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, callback=callback) |
def _normalize(self, name):
"""
Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation.
"""
name = re.sub(
r'^(Senator|Representative|Sen\.?|Rep\.?|'
'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|'
'Assembly(member|man|woman)) ',
'',
name)
return name.strip().lower().replace('.', '') | Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation. | Below is the the instruction that describes the task:
### Input:
Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation.
### Response:
def _normalize(self, name):
"""
Normalizes a legislator name by stripping titles from the front,
converting to lowercase and removing punctuation.
"""
name = re.sub(
r'^(Senator|Representative|Sen\.?|Rep\.?|'
'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|'
'Assembly(member|man|woman)) ',
'',
name)
return name.strip().lower().replace('.', '') |
def _getSimplePassage(self, reference=None):
""" Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage
"""
if reference is None:
return _SimplePassage(
resource=self.resource,
reference=None,
urn=self.urn,
citation=self.citation.root,
text=self
)
subcitation = self.citation.root[reference.depth-1]
resource = self.resource.xpath(
subcitation.fill(reference),
namespaces=XPATH_NAMESPACES
)
if len(resource) != 1:
raise InvalidURN
return _SimplePassage(
resource[0],
reference=reference,
urn=self.urn,
citation=subcitation,
text=self.textObject
) | Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage | Below is the the instruction that describes the task:
### Input:
Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage
### Response:
def _getSimplePassage(self, reference=None):
""" Retrieve a single node representing the passage.
.. warning:: Range support is awkward.
:param reference: Identifier of the subreference / passages
:type reference: list, reference
:returns: Asked passage
:rtype: CapitainsCtsPassage
"""
if reference is None:
return _SimplePassage(
resource=self.resource,
reference=None,
urn=self.urn,
citation=self.citation.root,
text=self
)
subcitation = self.citation.root[reference.depth-1]
resource = self.resource.xpath(
subcitation.fill(reference),
namespaces=XPATH_NAMESPACES
)
if len(resource) != 1:
raise InvalidURN
return _SimplePassage(
resource[0],
reference=reference,
urn=self.urn,
citation=subcitation,
text=self.textObject
) |
def write_turbomole(basis):
'''Converts a basis set to Gaussian format
'''
s = '$basis\n'
s += '*\n'
# TM basis sets are completely uncontracted
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 0, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if len(electron_elements) > 0:
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, False)
s += '{} {}\n'.format(sym, basis['name'])
s += '*\n'
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += ' {} {}\n'.format(nprim, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=True)
s += '*\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '$ecp\n'
s += '*\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z)
s += '{} {}-ecp\n'.format(sym, basis['name'])
s += '*\n'
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += ' ncore = {} lmax = {}\n'.format(data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
if am[0] == max_ecp_am:
s += '{}\n'.format(amchar)
else:
s += '{}-{}\n'.format(amchar, max_ecp_amchar)
point_places = [9, 23, 32]
s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places, convert_exp=True)
s += '*\n'
s += '$end\n'
return s | Converts a basis set to Gaussian format | Below is the the instruction that describes the task:
### Input:
Converts a basis set to Gaussian format
### Response:
def write_turbomole(basis):
'''Converts a basis set to Gaussian format
'''
s = '$basis\n'
s += '*\n'
# TM basis sets are completely uncontracted
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 0, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if len(electron_elements) > 0:
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, False)
s += '{} {}\n'.format(sym, basis['name'])
s += '*\n'
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += ' {} {}\n'.format(nprim, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=True)
s += '*\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '$ecp\n'
s += '*\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z)
s += '{} {}-ecp\n'.format(sym, basis['name'])
s += '*\n'
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += ' ncore = {} lmax = {}\n'.format(data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
if am[0] == max_ecp_am:
s += '{}\n'.format(amchar)
else:
s += '{}-{}\n'.format(amchar, max_ecp_amchar)
point_places = [9, 23, 32]
s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places, convert_exp=True)
s += '*\n'
s += '$end\n'
return s |
def read(self,file):
"""Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file.
"""
DXfield = self
p = DXParser(file)
p.parse(DXfield) | Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file. | Below is the the instruction that describes the task:
### Input:
Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file.
### Response:
def read(self,file):
"""Read DX field from file.
dx = OpenDX.field.read(dxfile)
The classid is discarded and replaced with the one from the file.
"""
DXfield = self
p = DXParser(file)
p.parse(DXfield) |
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df | Add rolling statistics variables derived from a specified variable in a
DataFrame. | Below is the the instruction that describes the task:
### Input:
Add rolling statistics variables derived from a specified variable in a
DataFrame.
### Response:
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df |
def get_fail_graph(self, failure_index=None):
"""Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.get_graph() | Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object. | Below is the the instruction that describes the task:
### Input:
Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object.
### Response:
def get_fail_graph(self, failure_index=None):
"""Returns a graph showing a solve failure.
Args:
failure_index: See `failure_reason`
Returns:
A pygraph.digraph object.
"""
phase, _ = self._get_failed_phase(failure_index)
return phase.get_graph() |
def generate_X_grid(self, term, n=100, meshgrid=False):
"""create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
# process each subterm in a TensorTerm
if self.terms[term].istensor:
Xs = []
for term_ in self.terms[term]:
Xs.append(np.linspace(term_.edge_knots_[0],
term_.edge_knots_[1],
num=n))
Xs = np.meshgrid(*Xs, indexing='ij')
if meshgrid:
return tuple(Xs)
else:
return self._flatten_mesh(Xs, term=term)
# all other Terms
elif hasattr(self.terms[term], 'edge_knots_'):
x = np.linspace(self.terms[term].edge_knots_[0],
self.terms[term].edge_knots_[1],
num=n)
if meshgrid:
return (x,)
# fill in feature matrix with only relevant features for this term
X = np.zeros((n, self.statistics_['m_features']))
X[:, self.terms[term].feature] = x
if getattr(self.terms[term], 'by', None) is not None:
X[:, self.terms[term].by] = 1.
return X
# dont know what to do here
else:
raise TypeError('Unexpected term type: {}'.format(self.terms[term])) | create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term. | Below is the the instruction that describes the task:
### Input:
create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
### Response:
def generate_X_grid(self, term, n=100, meshgrid=False):
"""create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
# process each subterm in a TensorTerm
if self.terms[term].istensor:
Xs = []
for term_ in self.terms[term]:
Xs.append(np.linspace(term_.edge_knots_[0],
term_.edge_knots_[1],
num=n))
Xs = np.meshgrid(*Xs, indexing='ij')
if meshgrid:
return tuple(Xs)
else:
return self._flatten_mesh(Xs, term=term)
# all other Terms
elif hasattr(self.terms[term], 'edge_knots_'):
x = np.linspace(self.terms[term].edge_knots_[0],
self.terms[term].edge_knots_[1],
num=n)
if meshgrid:
return (x,)
# fill in feature matrix with only relevant features for this term
X = np.zeros((n, self.statistics_['m_features']))
X[:, self.terms[term].feature] = x
if getattr(self.terms[term], 'by', None) is not None:
X[:, self.terms[term].by] = 1.
return X
# dont know what to do here
else:
raise TypeError('Unexpected term type: {}'.format(self.terms[term])) |
def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers)) | do not use directly | Below is the the instruction that describes the task:
### Input:
do not use directly
### Response:
def send_error_explain(self, code, message=None, headers=None, content_type=None):
"do not use directly"
if headers is None:
headers = {}
if code in self.responses:
if message is None:
message = self.responses[code][0]
explain = self.responses[code][1]
else:
explain = ""
if message is None:
message = ""
if not isinstance(headers, dict):
headers = {}
if not content_type:
if self._cmd and self._cmd.content_type:
content_type = self._cmd.content_type
else:
content_type = self._DEFAULT_CONTENT_TYPE
if self._cmd and self._cmd.charset:
charset = self._cmd.charset
else:
charset = DEFAULT_CHARSET
headers['Content-type'] = "%s; charset=%s" % (content_type, charset)
data = self._mk_error_explain_data(code, message, explain)
self.end_response(self.build_response(code, data, headers)) |
def get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: "head",
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if head_node is None or provider.node_tags(head_node).get(
TAG_RAY_LAUNCH_CONFIG) != launch_hash:
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated",
yes)
logger.info(
"get_or_create_head_node: "
"Terminating outdated head node {}".format(head_node))
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"])
provider.create_node(config["head_node"], head_node_tags, 1)
nodes = provider.non_terminated_nodes(head_node_tags)
assert len(nodes) == 1, "Failed to create head node."
head_node = nodes[0]
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config = copy.deepcopy(config)
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile(
"w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update({
remote_key_path: config["auth"]["ssh_private_key"],
"~/ray_bootstrap_config.yaml": remote_config_file.name
})
if restart_only:
init_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
else:
init_commands = (config["head_setup_commands"] +
config["head_start_ray_commands"])
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
runtime_hash=runtime_hash,
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error("get_or_create_head_node: "
"Updating {} failed".format(head_node_ip))
sys.exit(1)
logger.info(
"get_or_create_head_node: "
"Head node up-to-date, IP address is: {}".format(head_node_ip))
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = bool(config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(
quote(override_cluster_name))
else:
modifiers = ""
print("To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file, "--docker " if use_docker else " ",
quote(monitor_str), modifiers))
print("To open a console on the cluster:\n\n"
" ray attach {}{}\n".format(config_file, modifiers))
print("To ssh manually to the cluster, run:\n\n"
" ssh -i {} {}@{}\n".format(config["auth"]["ssh_private_key"],
config["auth"]["ssh_user"],
head_node_ip))
finally:
provider.cleanup() | Create the cluster head node, which in turn creates the workers. | Below is the the instruction that describes the task:
### Input:
Create the cluster head node, which in turn creates the workers.
### Response:
def get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: "head",
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if head_node is None or provider.node_tags(head_node).get(
TAG_RAY_LAUNCH_CONFIG) != launch_hash:
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated",
yes)
logger.info(
"get_or_create_head_node: "
"Terminating outdated head node {}".format(head_node))
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"])
provider.create_node(config["head_node"], head_node_tags, 1)
nodes = provider.non_terminated_nodes(head_node_tags)
assert len(nodes) == 1, "Failed to create head node."
head_node = nodes[0]
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config = copy.deepcopy(config)
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile(
"w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update({
remote_key_path: config["auth"]["ssh_private_key"],
"~/ray_bootstrap_config.yaml": remote_config_file.name
})
if restart_only:
init_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
else:
init_commands = (config["head_setup_commands"] +
config["head_start_ray_commands"])
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
runtime_hash=runtime_hash,
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error("get_or_create_head_node: "
"Updating {} failed".format(head_node_ip))
sys.exit(1)
logger.info(
"get_or_create_head_node: "
"Head node up-to-date, IP address is: {}".format(head_node_ip))
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = bool(config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(
quote(override_cluster_name))
else:
modifiers = ""
print("To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file, "--docker " if use_docker else " ",
quote(monitor_str), modifiers))
print("To open a console on the cluster:\n\n"
" ray attach {}{}\n".format(config_file, modifiers))
print("To ssh manually to the cluster, run:\n\n"
" ssh -i {} {}@{}\n".format(config["auth"]["ssh_private_key"],
config["auth"]["ssh_user"],
head_node_ip))
finally:
provider.cleanup() |
def subvol_delete(self, path):
"""
Delete a btrfs subvolume in the specified path
:param path: path to delete
"""
args = {
'path': path
}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_delete', args) | Delete a btrfs subvolume in the specified path
:param path: path to delete | Below is the the instruction that describes the task:
### Input:
Delete a btrfs subvolume in the specified path
:param path: path to delete
### Response:
def subvol_delete(self, path):
"""
Delete a btrfs subvolume in the specified path
:param path: path to delete
"""
args = {
'path': path
}
self._subvol_chk.check(args)
self._client.sync('btrfs.subvol_delete', args) |
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions) | Make sure that the file is writeable. Useful if our source is
read-only. | Below is the the instruction that describes the task:
### Input:
Make sure that the file is writeable. Useful if our source is
read-only.
### Response:
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions) |
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %s is not hooked", self._addr_to_str(addr))
return None
return self._sim_procedures[addr] | Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked. | Below is the the instruction that describes the task:
### Input:
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
### Response:
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %s is not hooked", self._addr_to_str(addr))
return None
return self._sim_procedures[addr] |
def image_coordinates(self, point):
'''given a point in window coordinates, calculate image coordinates'''
# the dragpos is the top left position in image coordinates
ret = wx.Point(int(self.dragpos.x + point.x/self.zoom),
int(self.dragpos.y + point.y/self.zoom))
return ret | given a point in window coordinates, calculate image coordinates | Below is the the instruction that describes the task:
### Input:
given a point in window coordinates, calculate image coordinates
### Response:
def image_coordinates(self, point):
'''given a point in window coordinates, calculate image coordinates'''
# the dragpos is the top left position in image coordinates
ret = wx.Point(int(self.dragpos.x + point.x/self.zoom),
int(self.dragpos.y + point.y/self.zoom))
return ret |
def extend_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also extends the list, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to extend the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(
in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
if last_key not in dict_pointer or dict_pointer[last_key] is None:
dict_pointer[last_key] = []
try:
dict_pointer[last_key].extend(value)
except AttributeError:
raise SaltInvocationError('The last key contains a {}, which cannot extend.'
''.format(type(dict_pointer[last_key])))
except TypeError:
raise SaltInvocationError('Cannot extend {} with a {}.'
''.format(type(dict_pointer[last_key]), type(value)))
return in_dict | Ensures that in_dict contains the series of recursive keys defined in keys.
Also extends the list, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to extend the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place. | Below is the the instruction that describes the task:
### Input:
Ensures that in_dict contains the series of recursive keys defined in keys.
Also extends the list, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to extend the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
### Response:
def extend_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also extends the list, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to extend the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(
in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
if last_key not in dict_pointer or dict_pointer[last_key] is None:
dict_pointer[last_key] = []
try:
dict_pointer[last_key].extend(value)
except AttributeError:
raise SaltInvocationError('The last key contains a {}, which cannot extend.'
''.format(type(dict_pointer[last_key])))
except TypeError:
raise SaltInvocationError('Cannot extend {} with a {}.'
''.format(type(dict_pointer[last_key]), type(value)))
return in_dict |
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns) | Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data") | Below is the the instruction that describes the task:
### Input:
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
### Response:
def upload_file(path, destination_frame=None, header=0, sep=None, col_names=None, col_types=None,
na_strings=None, skipped_columns=None):
"""
Upload a dataset from the provided local path to the H2O cluster.
Does a single-threaded push to H2O. Also see :meth:`import_file`.
:param path: A path specifying the location of the data to upload.
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
be automatically generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param sep: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param col_names: A list of column names for the file.
:param col_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:returns: a new :class:`H2OFrame` instance.
:examples:
>>> frame = h2o.upload_file("/path/to/local/data")
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(path, str)
assert_is_type(destination_frame, str, None)
assert_is_type(header, -1, 0, 1)
assert_is_type(sep, None, I(str, lambda s: len(s) == 1))
assert_is_type(col_names, [str], None)
assert_is_type(col_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
assert (skipped_columns==None) or isinstance(skipped_columns, list), \
"The skipped_columns should be an list of column names!"
check_frame_id(destination_frame)
if path.startswith("~"):
path = os.path.expanduser(path)
return H2OFrame()._upload_parse(path, destination_frame, header, sep, col_names, col_types, na_strings, skipped_columns) |
def _register_live_trades_channels(self):
"""
Registers the binding for the live_trades_channels channels.
:return:
"""
channels = {'live_trades': self.btcusd_lt_callback,
'live_trades_btceur': self.btceur_lt_callback,
'live_trades_eurusd': self.eurusd_lt_callback,
'live_trades_xrpusd': self.xrpusd_lt_callback,
'live_trades_xrpeur': self.xrpeur_lt_callback,
'live_trades_xrpbtc': self.xrpbtc_lt_callback}
event = 'trade'
self._bind_channels(event, channels) | Registers the binding for the live_trades_channels channels.
:return: | Below is the the instruction that describes the task:
### Input:
Registers the binding for the live_trades_channels channels.
:return:
### Response:
def _register_live_trades_channels(self):
"""
Registers the binding for the live_trades_channels channels.
:return:
"""
channels = {'live_trades': self.btcusd_lt_callback,
'live_trades_btceur': self.btceur_lt_callback,
'live_trades_eurusd': self.eurusd_lt_callback,
'live_trades_xrpusd': self.xrpusd_lt_callback,
'live_trades_xrpeur': self.xrpeur_lt_callback,
'live_trades_xrpbtc': self.xrpbtc_lt_callback}
event = 'trade'
self._bind_channels(event, channels) |
def get_endpoint_obj(client, endpoint, object_id):
''' Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result '''
endpoint = '/'.join([endpoint, str(object_id)])
return client.authenticated_request(endpoint).json() | Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result | Below is the the instruction that describes the task:
### Input:
Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result
### Response:
def get_endpoint_obj(client, endpoint, object_id):
''' Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result '''
endpoint = '/'.join([endpoint, str(object_id)])
return client.authenticated_request(endpoint).json() |
def scan_config_path(cls, project, scan_config):
"""Return a fully-qualified scan_config string."""
return google.api_core.path_template.expand(
"projects/{project}/scanConfigs/{scan_config}",
project=project,
scan_config=scan_config,
) | Return a fully-qualified scan_config string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified scan_config string.
### Response:
def scan_config_path(cls, project, scan_config):
"""Return a fully-qualified scan_config string."""
return google.api_core.path_template.expand(
"projects/{project}/scanConfigs/{scan_config}",
project=project,
scan_config=scan_config,
) |
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response | 获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30 | Below is the the instruction that describes the task:
### Input:
获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
### Response:
def user_follows(uid, offset='0', limit=30):
"""获取用户关注列表
:param uid: 用户的ID,可通过登录或者其他接口获取
:param offset: (optional) 分段起始位置,默认 0
:param limit: (optional) 数据上限多少行,默认 30
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_FOLLOWS'
r.params = {'uid': uid}
r.data = {'offset': offset, 'limit': limit, 'order': True}
r.send()
return r.response |
def _get_ATOM_sequences(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
residue_lines_by_chain = []
structural_residue_IDs_set = []
present_chain_ids = {}
for l in self.structure_lines:
if len(l) > 21 and l[:3] != 'TER':
present_chain_ids[l[21]] = present_chain_ids.get(l[21], set())
present_chain_ids[l[21]].add(l[:6])
model_index = 0
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
full_code_map = {}
hetatm_map = {}
full_atom_map = {}
for l in self.structure_lines:
chain_id = None
if l.startswith("TER "):
model_index += 1
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
else:
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set[model_index]:
residue_lines_by_chain[model_index].append(l)
structural_residue_IDs_set[model_index].add(residue_id)
if l.startswith('ATOM'):
chain_id = l[21]
# Only use ATOM records to build the code map as chains can have ligands HETATMs
full_code_map[chain_id] = full_code_map.get(chain_id, set())
full_code_map[chain_id].add(l[17:20].strip())
# Only use ATOM records to build the atom map as CA-only chains can have ligands described in full as HETATMs
full_atom_map[chain_id] = full_atom_map.get(chain_id, set())
full_atom_map[chain_id].add(l[12:16].strip())
elif l.startswith('HETATM'):
chain_id = l[21]
hetatm_map[chain_id] = hetatm_map.get(chain_id, set())
hetatm_map[chain_id].add(l[17:20].strip())
# Get the residues used by the residue lines. These can be used to determine the chain type if the header is missing.
for chain_id in self.atom_chain_order:
if full_code_map.get(chain_id):
# The chains may contain other molecules e.g. MG or HOH so before we decide their type based on residue types alone,
# we subtract out those non-canonicals
canonical_molecules = full_code_map[chain_id].intersection(dna_nucleotides.union(rna_nucleotides).union(residue_types_3))
determined_chain_type = None
if canonical_molecules.union(dna_nucleotides) == dna_nucleotides:
determined_chain_type = 'DNA'
elif canonical_molecules.union(rna_nucleotides) == rna_nucleotides:
determined_chain_type = 'RNA'
elif len(full_code_map[chain_id]) == 1 and 'UNK' in full_code_map[chain_id]:
determined_chain_type = 'Unknown'
elif canonical_molecules:
if len(full_atom_map[chain_id]) == 1 and 'CA' in full_atom_map[chain_id]:
determined_chain_type = 'Protein skeleton'
else:
determined_chain_type = 'Protein'
else:
determined_chain_type = PDB._determine_heterogen_chain_type(canonical_molecules)
if self.chain_types.get(chain_id):
assert(self.chain_types[chain_id] == determined_chain_type)
else:
self.chain_types[chain_id] = determined_chain_type
line_types_by_chain = []
chain_ids = []
for model_index in range(len(residue_lines_by_chain)):
line_types = set()
if residue_lines_by_chain[model_index]:
if missing_chain_ids.get(self.pdb_id):
chain_ids.append(missing_chain_ids[self.pdb_id])
else:
chain_ids.append(residue_lines_by_chain[model_index][0][21])
for l in residue_lines_by_chain[model_index]:
line_types.add(l[0:6])
if line_types == set(['ATOM']):
line_types_by_chain.append('ATOM')
elif line_types == set(['HETATM']):
line_types_by_chain.append('HETATM')
else:
line_types_by_chain.append('Mixed')
for x in range(0, len(residue_lines_by_chain)):
residue_lines = residue_lines_by_chain[x]
line_types = line_types_by_chain[x]
if ignore_HETATMs and line_types == 'HETATM':
continue
for y in range(len(residue_lines)):
l = residue_lines[y]
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif y == (len(residue_lines) - 1):
# last residue in the chain
if residue_type == 'NH2':
residue_type = 'UNK' # fixes a few cases e.g. 1MBG, 1K9Q, 1KA6
elif ignore_HETATMs:
continue
elif ignore_HETATMs:
continue
residue_id = l[21:27]
chain_id = l[21]
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
if chain_id in self.chain_types:
# This means the pdb had SEQRES and we constructed atom_sequences
chain_type = self.chain_types[chain_id]
else:
# Otherwise assume this is protein
chain_type = 'Protein'
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Unknown':
assert(False) # we should not reach here - Unknown chains should only contain UNK records
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type) or non_canonical_amino_acids.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
if not short_residue_type:
if l.startswith("ATOM") and l[12:16] == ' OH2' and l[17:20] == 'TIP':
continue
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
# KAB - way to allow for multiresidue noncanonical AA's
if len(short_residue_type) == 1:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
else:
for char in short_residue_type:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], char, chain_type))
# Assign 'Ligand' or 'Heterogen' to all HETATM-only chains
for chain_id in present_chain_ids.keys():
if chain_id not in self.chain_types:
assert('ATOM ' not in present_chain_ids[chain_id])
self.chain_types[chain_id] = PDB._determine_heterogen_chain_type(hetatm_map.get(chain_id, set()))
self.atom_sequences = atom_sequences | Creates the ATOM Sequences. | Below is the the instruction that describes the task:
### Input:
Creates the ATOM Sequences.
### Response:
def _get_ATOM_sequences(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
residue_lines_by_chain = []
structural_residue_IDs_set = []
present_chain_ids = {}
for l in self.structure_lines:
if len(l) > 21 and l[:3] != 'TER':
present_chain_ids[l[21]] = present_chain_ids.get(l[21], set())
present_chain_ids[l[21]].add(l[:6])
model_index = 0
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
full_code_map = {}
hetatm_map = {}
full_atom_map = {}
for l in self.structure_lines:
chain_id = None
if l.startswith("TER "):
model_index += 1
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
else:
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set[model_index]:
residue_lines_by_chain[model_index].append(l)
structural_residue_IDs_set[model_index].add(residue_id)
if l.startswith('ATOM'):
chain_id = l[21]
# Only use ATOM records to build the code map as chains can have ligands HETATMs
full_code_map[chain_id] = full_code_map.get(chain_id, set())
full_code_map[chain_id].add(l[17:20].strip())
# Only use ATOM records to build the atom map as CA-only chains can have ligands described in full as HETATMs
full_atom_map[chain_id] = full_atom_map.get(chain_id, set())
full_atom_map[chain_id].add(l[12:16].strip())
elif l.startswith('HETATM'):
chain_id = l[21]
hetatm_map[chain_id] = hetatm_map.get(chain_id, set())
hetatm_map[chain_id].add(l[17:20].strip())
# Get the residues used by the residue lines. These can be used to determine the chain type if the header is missing.
for chain_id in self.atom_chain_order:
if full_code_map.get(chain_id):
# The chains may contain other molecules e.g. MG or HOH so before we decide their type based on residue types alone,
# we subtract out those non-canonicals
canonical_molecules = full_code_map[chain_id].intersection(dna_nucleotides.union(rna_nucleotides).union(residue_types_3))
determined_chain_type = None
if canonical_molecules.union(dna_nucleotides) == dna_nucleotides:
determined_chain_type = 'DNA'
elif canonical_molecules.union(rna_nucleotides) == rna_nucleotides:
determined_chain_type = 'RNA'
elif len(full_code_map[chain_id]) == 1 and 'UNK' in full_code_map[chain_id]:
determined_chain_type = 'Unknown'
elif canonical_molecules:
if len(full_atom_map[chain_id]) == 1 and 'CA' in full_atom_map[chain_id]:
determined_chain_type = 'Protein skeleton'
else:
determined_chain_type = 'Protein'
else:
determined_chain_type = PDB._determine_heterogen_chain_type(canonical_molecules)
if self.chain_types.get(chain_id):
assert(self.chain_types[chain_id] == determined_chain_type)
else:
self.chain_types[chain_id] = determined_chain_type
line_types_by_chain = []
chain_ids = []
for model_index in range(len(residue_lines_by_chain)):
line_types = set()
if residue_lines_by_chain[model_index]:
if missing_chain_ids.get(self.pdb_id):
chain_ids.append(missing_chain_ids[self.pdb_id])
else:
chain_ids.append(residue_lines_by_chain[model_index][0][21])
for l in residue_lines_by_chain[model_index]:
line_types.add(l[0:6])
if line_types == set(['ATOM']):
line_types_by_chain.append('ATOM')
elif line_types == set(['HETATM']):
line_types_by_chain.append('HETATM')
else:
line_types_by_chain.append('Mixed')
for x in range(0, len(residue_lines_by_chain)):
residue_lines = residue_lines_by_chain[x]
line_types = line_types_by_chain[x]
if ignore_HETATMs and line_types == 'HETATM':
continue
for y in range(len(residue_lines)):
l = residue_lines[y]
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif y == (len(residue_lines) - 1):
# last residue in the chain
if residue_type == 'NH2':
residue_type = 'UNK' # fixes a few cases e.g. 1MBG, 1K9Q, 1KA6
elif ignore_HETATMs:
continue
elif ignore_HETATMs:
continue
residue_id = l[21:27]
chain_id = l[21]
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
if chain_id in self.chain_types:
# This means the pdb had SEQRES and we constructed atom_sequences
chain_type = self.chain_types[chain_id]
else:
# Otherwise assume this is protein
chain_type = 'Protein'
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Unknown':
assert(False) # we should not reach here - Unknown chains should only contain UNK records
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type) or non_canonical_amino_acids.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
if not short_residue_type:
if l.startswith("ATOM") and l[12:16] == ' OH2' and l[17:20] == 'TIP':
continue
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
# KAB - way to allow for multiresidue noncanonical AA's
if len(short_residue_type) == 1:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
else:
for char in short_residue_type:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], char, chain_type))
# Assign 'Ligand' or 'Heterogen' to all HETATM-only chains
for chain_id in present_chain_ids.keys():
if chain_id not in self.chain_types:
assert('ATOM ' not in present_chain_ids[chain_id])
self.chain_types[chain_id] = PDB._determine_heterogen_chain_type(hetatm_map.get(chain_id, set()))
self.atom_sequences = atom_sequences |
def new_from_list(cls, content, fill_title=True, **kwargs):
"""Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
"""
obj = cls(**kwargs)
obj.append_from_list(content, fill_title)
return obj | Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title | Below is the the instruction that describes the task:
### Input:
Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
### Response:
def new_from_list(cls, content, fill_title=True, **kwargs):
"""Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
"""
obj = cls(**kwargs)
obj.append_from_list(content, fill_title)
return obj |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.