code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _get_log_rho_metropolis_hastings(self, proposed_point, proposed_eval):
"""calculate log(metropolis ratio times hastings factor)"""
return self._get_log_rho_metropolis(proposed_point, proposed_eval)\
- self.proposal.evaluate (proposed_point, self.current) \
+ self.proposal.evaluate (self.current, proposed_point) | calculate log(metropolis ratio times hastings factor) | Below is the the instruction that describes the task:
### Input:
calculate log(metropolis ratio times hastings factor)
### Response:
def _get_log_rho_metropolis_hastings(self, proposed_point, proposed_eval):
"""calculate log(metropolis ratio times hastings factor)"""
return self._get_log_rho_metropolis(proposed_point, proposed_eval)\
- self.proposal.evaluate (proposed_point, self.current) \
+ self.proposal.evaluate (self.current, proposed_point) |
def getPassagePlus(self, reference=None):
""" Retrieve a passage and informations around it and store it in the object
:param reference: Reference of the passage
:type reference: CtsReference or List of text_type
:rtype: CtsPassage
:returns: Object representing the passage
:raises: *TypeError* when reference is not a list or a Reference
"""
if reference:
urn = "{0}:{1}".format(self.urn, reference)
else:
urn = str(self.urn)
response = xmlparser(self.retriever.getPassagePlus(urn=urn))
passage = CtsPassage(urn=urn, resource=response, retriever=self.retriever)
passage._parse_request(response.xpath("//ti:reply/ti:label", namespaces=XPATH_NAMESPACES)[0])
self.citation = passage.citation
return passage | Retrieve a passage and informations around it and store it in the object
:param reference: Reference of the passage
:type reference: CtsReference or List of text_type
:rtype: CtsPassage
:returns: Object representing the passage
:raises: *TypeError* when reference is not a list or a Reference | Below is the the instruction that describes the task:
### Input:
Retrieve a passage and informations around it and store it in the object
:param reference: Reference of the passage
:type reference: CtsReference or List of text_type
:rtype: CtsPassage
:returns: Object representing the passage
:raises: *TypeError* when reference is not a list or a Reference
### Response:
def getPassagePlus(self, reference=None):
""" Retrieve a passage and informations around it and store it in the object
:param reference: Reference of the passage
:type reference: CtsReference or List of text_type
:rtype: CtsPassage
:returns: Object representing the passage
:raises: *TypeError* when reference is not a list or a Reference
"""
if reference:
urn = "{0}:{1}".format(self.urn, reference)
else:
urn = str(self.urn)
response = xmlparser(self.retriever.getPassagePlus(urn=urn))
passage = CtsPassage(urn=urn, resource=response, retriever=self.retriever)
passage._parse_request(response.xpath("//ti:reply/ti:label", namespaces=XPATH_NAMESPACES)[0])
self.citation = passage.citation
return passage |
def missing_fun_string(self, function_name):
'''
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
'''
mod_name = function_name.split('.')[0]
if mod_name in self.loaded_modules:
return '\'{0}\' is not available.'.format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return '\'{0}\' is not available.'.format(function_name)
else:
if reason is not None:
return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
else:
return '\'{0}\' __virtual__ returned False'.format(mod_name) | Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False | Below is the the instruction that describes the task:
### Input:
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
### Response:
def missing_fun_string(self, function_name):
'''
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
'''
mod_name = function_name.split('.')[0]
if mod_name in self.loaded_modules:
return '\'{0}\' is not available.'.format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return '\'{0}\' is not available.'.format(function_name)
else:
if reason is not None:
return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
else:
return '\'{0}\' __virtual__ returned False'.format(mod_name) |
def ohlc(self, pair, timeframe=None, **kwargs):
"""
Subscribe to the passed pair's OHLC data channel.
:param pair: str, Pair to request data for.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return:
"""
valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',
'7D', '14D', '1M']
if timeframe:
if timeframe not in valid_tfs:
raise ValueError("timeframe must be any of %s" % valid_tfs)
else:
timeframe = '1m'
pair = 't' + pair if not pair.startswith('t') else pair
key = 'trade:' + timeframe + ':' + pair
self._subscribe('candles', key=key, **kwargs) | Subscribe to the passed pair's OHLC data channel.
:param pair: str, Pair to request data for.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Subscribe to the passed pair's OHLC data channel.
:param pair: str, Pair to request data for.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return:
### Response:
def ohlc(self, pair, timeframe=None, **kwargs):
"""
Subscribe to the passed pair's OHLC data channel.
:param pair: str, Pair to request data for.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return:
"""
valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',
'7D', '14D', '1M']
if timeframe:
if timeframe not in valid_tfs:
raise ValueError("timeframe must be any of %s" % valid_tfs)
else:
timeframe = '1m'
pair = 't' + pair if not pair.startswith('t') else pair
key = 'trade:' + timeframe + ':' + pair
self._subscribe('candles', key=key, **kwargs) |
def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None,
executor=None, echo=False, after_created=None, **kwargs):
"""Accepts an ODBC connection string and returns a new Connection object.
The connection string can be passed as the string `str`, as a list of
keywords,or a combination of the two. Any keywords except autocommit,
ansi, and timeout are simply added to the connection string.
:param autocommit bool: False or zero, the default, if True or non-zero,
the connection is put into ODBC autocommit mode and statements are
committed automatically.
:param ansi bool: By default, pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW. If the driver returns IM001
indicating it does not support the Unicode version, the ANSI version
is tried.
:param timeout int: An integer login timeout in seconds, used to set
the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is
0 which means the database's default timeout, if any, is use
:param after_created callable: support customize configuration after
connection is connected. Must be an async unary function, or leave it
as None.
"""
return _ContextManager(_connect(dsn=dsn, autocommit=autocommit,
ansi=ansi, timeout=timeout, loop=loop,
executor=executor, echo=echo,
after_created=after_created, **kwargs)) | Accepts an ODBC connection string and returns a new Connection object.
The connection string can be passed as the string `str`, as a list of
keywords,or a combination of the two. Any keywords except autocommit,
ansi, and timeout are simply added to the connection string.
:param autocommit bool: False or zero, the default, if True or non-zero,
the connection is put into ODBC autocommit mode and statements are
committed automatically.
:param ansi bool: By default, pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW. If the driver returns IM001
indicating it does not support the Unicode version, the ANSI version
is tried.
:param timeout int: An integer login timeout in seconds, used to set
the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is
0 which means the database's default timeout, if any, is use
:param after_created callable: support customize configuration after
connection is connected. Must be an async unary function, or leave it
as None. | Below is the the instruction that describes the task:
### Input:
Accepts an ODBC connection string and returns a new Connection object.
The connection string can be passed as the string `str`, as a list of
keywords,or a combination of the two. Any keywords except autocommit,
ansi, and timeout are simply added to the connection string.
:param autocommit bool: False or zero, the default, if True or non-zero,
the connection is put into ODBC autocommit mode and statements are
committed automatically.
:param ansi bool: By default, pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW. If the driver returns IM001
indicating it does not support the Unicode version, the ANSI version
is tried.
:param timeout int: An integer login timeout in seconds, used to set
the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is
0 which means the database's default timeout, if any, is use
:param after_created callable: support customize configuration after
connection is connected. Must be an async unary function, or leave it
as None.
### Response:
def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None,
executor=None, echo=False, after_created=None, **kwargs):
"""Accepts an ODBC connection string and returns a new Connection object.
The connection string can be passed as the string `str`, as a list of
keywords,or a combination of the two. Any keywords except autocommit,
ansi, and timeout are simply added to the connection string.
:param autocommit bool: False or zero, the default, if True or non-zero,
the connection is put into ODBC autocommit mode and statements are
committed automatically.
:param ansi bool: By default, pyodbc first attempts to connect using
the Unicode version of SQLDriverConnectW. If the driver returns IM001
indicating it does not support the Unicode version, the ANSI version
is tried.
:param timeout int: An integer login timeout in seconds, used to set
the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is
0 which means the database's default timeout, if any, is use
:param after_created callable: support customize configuration after
connection is connected. Must be an async unary function, or leave it
as None.
"""
return _ContextManager(_connect(dsn=dsn, autocommit=autocommit,
ansi=ansi, timeout=timeout, loop=loop,
executor=executor, echo=echo,
after_created=after_created, **kwargs)) |
def compute_merkle_tree(items: Iterable[bytes]) -> MerkleTree:
""" Calculates the merkle root for a given list of items """
if not all(isinstance(l, bytes) and len(l) == 32 for l in items):
raise ValueError('Not all items are hashes')
leaves = sorted(items)
if len(leaves) == 0:
return MerkleTree(layers=[[EMPTY_MERKLE_ROOT]])
if not len(leaves) == len(set(leaves)):
raise ValueError('The leaves items must not contain duplicate items')
tree = [leaves]
layer = leaves
while len(layer) > 1:
# [a, b, c, d, e] -> [(a, b), (c, d), (e, None)]
iterator = iter(layer)
paired_items = zip_longest(iterator, iterator)
layer = [_hash_pair(a, b) for a, b in paired_items]
tree.append(layer)
return MerkleTree(layers=tree) | Calculates the merkle root for a given list of items | Below is the the instruction that describes the task:
### Input:
Calculates the merkle root for a given list of items
### Response:
def compute_merkle_tree(items: Iterable[bytes]) -> MerkleTree:
""" Calculates the merkle root for a given list of items """
if not all(isinstance(l, bytes) and len(l) == 32 for l in items):
raise ValueError('Not all items are hashes')
leaves = sorted(items)
if len(leaves) == 0:
return MerkleTree(layers=[[EMPTY_MERKLE_ROOT]])
if not len(leaves) == len(set(leaves)):
raise ValueError('The leaves items must not contain duplicate items')
tree = [leaves]
layer = leaves
while len(layer) > 1:
# [a, b, c, d, e] -> [(a, b), (c, d), (e, None)]
iterator = iter(layer)
paired_items = zip_longest(iterator, iterator)
layer = [_hash_pair(a, b) for a, b in paired_items]
tree.append(layer)
return MerkleTree(layers=tree) |
def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) | Remove a fils, including biological index files. | Below is the the instruction that describes the task:
### Input:
Remove a fils, including biological index files.
### Response:
def remove_plus(orig):
"""Remove a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext):
remove_safe(orig + ext) |
def trim(self, inplace=False):
'''Return a DataFrame, where all columns are 'trimmed' by the active range.
For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).
{note_copy}
:param inplace: {inplace}
:rtype: DataFrame
'''
df = self if inplace else self.copy()
for name in df:
column = df.columns.get(name)
if column is not None:
if self._index_start == 0 and len(column) == self._index_end:
pass # we already assigned it in .copy
else:
if isinstance(column, np.ndarray): # real array
df.columns[name] = column[self._index_start:self._index_end]
else:
df.columns[name] = column.trim(self._index_start, self._index_end)
df._length_original = self.length_unfiltered()
df._length_unfiltered = df._length_original
df._index_start = 0
df._index_end = df._length_original
df._active_fraction = 1
return df | Return a DataFrame, where all columns are 'trimmed' by the active range.
For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).
{note_copy}
:param inplace: {inplace}
:rtype: DataFrame | Below is the the instruction that describes the task:
### Input:
Return a DataFrame, where all columns are 'trimmed' by the active range.
For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).
{note_copy}
:param inplace: {inplace}
:rtype: DataFrame
### Response:
def trim(self, inplace=False):
'''Return a DataFrame, where all columns are 'trimmed' by the active range.
For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).
{note_copy}
:param inplace: {inplace}
:rtype: DataFrame
'''
df = self if inplace else self.copy()
for name in df:
column = df.columns.get(name)
if column is not None:
if self._index_start == 0 and len(column) == self._index_end:
pass # we already assigned it in .copy
else:
if isinstance(column, np.ndarray): # real array
df.columns[name] = column[self._index_start:self._index_end]
else:
df.columns[name] = column.trim(self._index_start, self._index_end)
df._length_original = self.length_unfiltered()
df._length_unfiltered = df._length_original
df._index_start = 0
df._index_end = df._length_original
df._active_fraction = 1
return df |
def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio | returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack | Below is the the instruction that describes the task:
### Input:
returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
### Response:
def get_isotopic_ratio(self, compound='', element=''):
"""returns the list of isotopes for the element of the compound defined with their stoichiometric values
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack
"""
_stack = self.stack
compound = str(compound)
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_compound = str(_compound)
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_isotopic_ratio(
compound=_compound,
element=_element)
return list_all_dict
# checking compound is valid
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined))
# checking element is valid
if element == '':
# we assume that the element and compounds names matched
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
list_istopes = _stack[compound][element]['isotopes']['list']
list_ratio = _stack[compound][element]['isotopes']['isotopic_ratio']
iso_ratio = zip(list_istopes, list_ratio)
_stoichiometric_ratio = {}
for _iso, _ratio in iso_ratio:
_stoichiometric_ratio[_iso] = _ratio
return _stoichiometric_ratio |
def clearAnnouncements(self):
"""
Flushes the announcement table.
"""
try:
q = models.Announcement.delete().where(
models.Announcement.id > 0)
q.execute()
except Exception as e:
raise exceptions.RepoManagerException(e) | Flushes the announcement table. | Below is the the instruction that describes the task:
### Input:
Flushes the announcement table.
### Response:
def clearAnnouncements(self):
"""
Flushes the announcement table.
"""
try:
q = models.Announcement.delete().where(
models.Announcement.id > 0)
q.execute()
except Exception as e:
raise exceptions.RepoManagerException(e) |
def get_authorization_url(self):
"""Get the authorization Url for the current client."""
return self._format_url(
OAUTH2_ROOT + 'authorize',
query = {
'response_type': 'code',
'client_id': self.client.get('client_id', ''),
'redirect_uri': self.client.get('redirect_uri', '')
}) | Get the authorization Url for the current client. | Below is the the instruction that describes the task:
### Input:
Get the authorization Url for the current client.
### Response:
def get_authorization_url(self):
"""Get the authorization Url for the current client."""
return self._format_url(
OAUTH2_ROOT + 'authorize',
query = {
'response_type': 'code',
'client_id': self.client.get('client_id', ''),
'redirect_uri': self.client.get('redirect_uri', '')
}) |
def start(self, sockets=None, **kwargs):
"""
Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed.
"""
pty_stdin, pty_stdout, pty_stderr = sockets or self.sockets()
pumps = []
if pty_stdin and self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), pty_stdin, wait_for_output=False))
if pty_stdout:
pumps.append(io.Pump(pty_stdout, io.Stream(self.stdout), propagate_close=False))
if pty_stderr:
pumps.append(io.Pump(pty_stderr, io.Stream(self.stderr), propagate_close=False))
if not self._container_info()['State']['Running']:
self.client.start(self.container, **kwargs)
return pumps | Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed. | Below is the the instruction that describes the task:
### Input:
Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed.
### Response:
def start(self, sockets=None, **kwargs):
"""
Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed.
"""
pty_stdin, pty_stdout, pty_stderr = sockets or self.sockets()
pumps = []
if pty_stdin and self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), pty_stdin, wait_for_output=False))
if pty_stdout:
pumps.append(io.Pump(pty_stdout, io.Stream(self.stdout), propagate_close=False))
if pty_stderr:
pumps.append(io.Pump(pty_stderr, io.Stream(self.stderr), propagate_close=False))
if not self._container_info()['State']['Running']:
self.client.start(self.container, **kwargs)
return pumps |
def payload(self):
"""
Returns:
`str` when not json.
`dict` when json.
"""
if self.is_json:
if not self._body_parsed:
if hasattr(self._body, 'decode'):
body = self._body.decode('utf-8')
else:
body = self._body
self._body_parsed = json.loads(body)
return self._body_parsed
else:
return self._body | Returns:
`str` when not json.
`dict` when json. | Below is the the instruction that describes the task:
### Input:
Returns:
`str` when not json.
`dict` when json.
### Response:
def payload(self):
"""
Returns:
`str` when not json.
`dict` when json.
"""
if self.is_json:
if not self._body_parsed:
if hasattr(self._body, 'decode'):
body = self._body.decode('utf-8')
else:
body = self._body
self._body_parsed = json.loads(body)
return self._body_parsed
else:
return self._body |
def calc_mean_var_weights(returns, weight_bounds=(0., 1.),
rf=0.,
covar_method='ledoit-wolf',
options=None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Args:
* returns (DataFrame): Returns for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* covar_method (str): Covariance matrix estimation method.
Currently supported:
- `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_
- standard
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
def fitness(weights, exp_rets, covar, rf):
# portfolio mean
mean = sum(exp_rets * weights)
# portfolio var
var = np.dot(np.dot(weights, covar), weights)
# utility - i.e. sharpe ratio
util = (mean - rf) / np.sqrt(var)
# negative because we want to maximize and optimizer
# minimizes metric
return -util
n = len(returns.columns)
# expected return defaults to mean return by default
exp_rets = returns.mean()
# calc covariance matrix
if covar_method == 'ledoit-wolf':
covar = sklearn.covariance.ledoit_wolf(returns)[0]
elif covar_method == 'standard':
covar = returns.cov()
else:
raise NotImplementedError('covar_method not implemented')
weights = np.ones([n]) / n
bounds = [weight_bounds for i in range(n)]
# sum of weights must be equal to 1
constraints = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})
optimized = minimize(fitness, weights, (exp_rets, covar, rf),
method='SLSQP', constraints=constraints,
bounds=bounds, options=options)
# check if success
if not optimized.success:
raise Exception(optimized.message)
# return weight vector
return pd.Series({returns.columns[i]: optimized.x[i] for i in range(n)}) | Calculates the mean-variance weights given a DataFrame of returns.
Args:
* returns (DataFrame): Returns for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* covar_method (str): Covariance matrix estimation method.
Currently supported:
- `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_
- standard
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight} | Below is the the instruction that describes the task:
### Input:
Calculates the mean-variance weights given a DataFrame of returns.
Args:
* returns (DataFrame): Returns for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* covar_method (str): Covariance matrix estimation method.
Currently supported:
- `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_
- standard
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
### Response:
def calc_mean_var_weights(returns, weight_bounds=(0., 1.),
rf=0.,
covar_method='ledoit-wolf',
options=None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Args:
* returns (DataFrame): Returns for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* covar_method (str): Covariance matrix estimation method.
Currently supported:
- `ledoit-wolf <http://www.ledoit.net/honey.pdf>`_
- standard
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
def fitness(weights, exp_rets, covar, rf):
# portfolio mean
mean = sum(exp_rets * weights)
# portfolio var
var = np.dot(np.dot(weights, covar), weights)
# utility - i.e. sharpe ratio
util = (mean - rf) / np.sqrt(var)
# negative because we want to maximize and optimizer
# minimizes metric
return -util
n = len(returns.columns)
# expected return defaults to mean return by default
exp_rets = returns.mean()
# calc covariance matrix
if covar_method == 'ledoit-wolf':
covar = sklearn.covariance.ledoit_wolf(returns)[0]
elif covar_method == 'standard':
covar = returns.cov()
else:
raise NotImplementedError('covar_method not implemented')
weights = np.ones([n]) / n
bounds = [weight_bounds for i in range(n)]
# sum of weights must be equal to 1
constraints = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})
optimized = minimize(fitness, weights, (exp_rets, covar, rf),
method='SLSQP', constraints=constraints,
bounds=bounds, options=options)
# check if success
if not optimized.success:
raise Exception(optimized.message)
# return weight vector
return pd.Series({returns.columns[i]: optimized.x[i] for i in range(n)}) |
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format) | Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw" | Below is the the instruction that describes the task:
### Input:
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
### Response:
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format) |
def tvd(x0, rho, gamma):
"""
Proximal operator for the total variation denoising penalty
Requires scikit-image be installed
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
Raises
------
ImportError
If scikit-image fails to be imported
"""
try:
from skimage.restoration import denoise_tv_bregman
except ImportError:
print('Error: scikit-image not found. TVD will not work.')
return x0
return denoise_tv_bregman(x0, rho / gamma) | Proximal operator for the total variation denoising penalty
Requires scikit-image be installed
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
Raises
------
ImportError
If scikit-image fails to be imported | Below is the the instruction that describes the task:
### Input:
Proximal operator for the total variation denoising penalty
Requires scikit-image be installed
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
Raises
------
ImportError
If scikit-image fails to be imported
### Response:
def tvd(x0, rho, gamma):
"""
Proximal operator for the total variation denoising penalty
Requires scikit-image be installed
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
Raises
------
ImportError
If scikit-image fails to be imported
"""
try:
from skimage.restoration import denoise_tv_bregman
except ImportError:
print('Error: scikit-image not found. TVD will not work.')
return x0
return denoise_tv_bregman(x0, rho / gamma) |
def decode(self, encoded):
""" Decodes ``encoded`` label.
Args:
encoded (torch.Tensor): Encoded label.
Returns:
object: Label decoded from ``encoded``.
"""
encoded = super().decode(encoded)
if encoded.numel() > 1:
raise ValueError(
'``decode`` decodes one label at a time, use ``batch_decode`` instead.')
return self.itos[encoded.squeeze().item()] | Decodes ``encoded`` label.
Args:
encoded (torch.Tensor): Encoded label.
Returns:
object: Label decoded from ``encoded``. | Below is the the instruction that describes the task:
### Input:
Decodes ``encoded`` label.
Args:
encoded (torch.Tensor): Encoded label.
Returns:
object: Label decoded from ``encoded``.
### Response:
def decode(self, encoded):
""" Decodes ``encoded`` label.
Args:
encoded (torch.Tensor): Encoded label.
Returns:
object: Label decoded from ``encoded``.
"""
encoded = super().decode(encoded)
if encoded.numel() > 1:
raise ValueError(
'``decode`` decodes one label at a time, use ``batch_decode`` instead.')
return self.itos[encoded.squeeze().item()] |
def add_linear_constraints(self, *relations):
"""Add constraints to the problem
Each constraint is represented by a Relation, and the
expression in that relation can be a set expression.
"""
constraints = []
for relation in relations:
if self._check_relation(relation):
constraints.append(Constraint(self, None))
else:
for name in self._add_constraints(relation):
constraints.append(Constraint(self, name))
return constraints | Add constraints to the problem
Each constraint is represented by a Relation, and the
expression in that relation can be a set expression. | Below is the the instruction that describes the task:
### Input:
Add constraints to the problem
Each constraint is represented by a Relation, and the
expression in that relation can be a set expression.
### Response:
def add_linear_constraints(self, *relations):
"""Add constraints to the problem
Each constraint is represented by a Relation, and the
expression in that relation can be a set expression.
"""
constraints = []
for relation in relations:
if self._check_relation(relation):
constraints.append(Constraint(self, None))
else:
for name in self._add_constraints(relation):
constraints.append(Constraint(self, name))
return constraints |
def delete(self, digest):
"""
Delete a blob
:param digest: the hex digest of the blob to be deleted
:return: True if blob existed
"""
return self.conn.client.blob_del(self.container_name, digest) | Delete a blob
:param digest: the hex digest of the blob to be deleted
:return: True if blob existed | Below is the the instruction that describes the task:
### Input:
Delete a blob
:param digest: the hex digest of the blob to be deleted
:return: True if blob existed
### Response:
def delete(self, digest):
"""
Delete a blob
:param digest: the hex digest of the blob to be deleted
:return: True if blob existed
"""
return self.conn.client.blob_del(self.container_name, digest) |
def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits | Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled) | Below is the the instruction that describes the task:
### Input:
Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
### Response:
def get_style_bits(match=False, comment=False, selected=False, data=False, diff=False, user=0):
""" Return an int value that contains the specified style bits set.
Available styles for each byte are:
match: part of the currently matched search
comment: user commented area
selected: selected region
data: labeled in the disassembler as a data region (i.e. not disassembled)
"""
style_bits = 0
if user:
style_bits |= (user & user_bit_mask)
if diff:
style_bits |= diff_bit_mask
if match:
style_bits |= match_bit_mask
if comment:
style_bits |= comment_bit_mask
if data:
style_bits |= (data_style & user_bit_mask)
if selected:
style_bits |= selected_bit_mask
return style_bits |
def publish(self, message, routing_key=None):
"""Publish message to exchange
:param message: message for publishing
:type message: any serializable object
:param routing_key: routing key for queue
:return: None
"""
if routing_key is None:
routing_key = self.routing_key
return self.client.publish_to_exchange(self.name, message=message, routing_key=routing_key) | Publish message to exchange
:param message: message for publishing
:type message: any serializable object
:param routing_key: routing key for queue
:return: None | Below is the the instruction that describes the task:
### Input:
Publish message to exchange
:param message: message for publishing
:type message: any serializable object
:param routing_key: routing key for queue
:return: None
### Response:
def publish(self, message, routing_key=None):
"""Publish message to exchange
:param message: message for publishing
:type message: any serializable object
:param routing_key: routing key for queue
:return: None
"""
if routing_key is None:
routing_key = self.routing_key
return self.client.publish_to_exchange(self.name, message=message, routing_key=routing_key) |
def _find_resource_by_key(self, resource_list, key, value):
"""
Finds a resource by key, first case insensitive match.
Returns tuple of (key, resource)
Raises KeyError if the given key cannot be found.
"""
original = value
value = unicode(value.upper())
for k, resource in resource_list.iteritems():
if key in resource and resource[key].upper() == value:
return k, resource
raise KeyError, original | Finds a resource by key, first case insensitive match.
Returns tuple of (key, resource)
Raises KeyError if the given key cannot be found. | Below is the the instruction that describes the task:
### Input:
Finds a resource by key, first case insensitive match.
Returns tuple of (key, resource)
Raises KeyError if the given key cannot be found.
### Response:
def _find_resource_by_key(self, resource_list, key, value):
"""
Finds a resource by key, first case insensitive match.
Returns tuple of (key, resource)
Raises KeyError if the given key cannot be found.
"""
original = value
value = unicode(value.upper())
for k, resource in resource_list.iteritems():
if key in resource and resource[key].upper() == value:
return k, resource
raise KeyError, original |
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] | Yield egg or source distribution objects based on basename | Below is the the instruction that describes the task:
### Input:
Yield egg or source distribution objects based on basename
### Response:
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] |
def _retrieve_html_page(self, uri):
"""
Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'201802040nwe'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = BOXSCORE_URL % uri
try:
url_data = pq(url)
except HTTPError:
return None
# For NFL, a 404 page doesn't actually raise a 404 error, so it needs
# to be manually checked.
if '404 error' in str(url_data):
return None
return pq(utils._remove_html_comment_tags(url_data)) | Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'201802040nwe'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed. | Below is the the instruction that describes the task:
### Input:
Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'201802040nwe'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
### Response:
def _retrieve_html_page(self, uri):
"""
Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'201802040nwe'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = BOXSCORE_URL % uri
try:
url_data = pq(url)
except HTTPError:
return None
# For NFL, a 404 page doesn't actually raise a 404 error, so it needs
# to be manually checked.
if '404 error' in str(url_data):
return None
return pq(utils._remove_html_comment_tags(url_data)) |
def BGPNeighborPrefixExceeded_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
BGPNeighborPrefixExceeded = ET.SubElement(config, "BGPNeighborPrefixExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(BGPNeighborPrefixExceeded, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def BGPNeighborPrefixExceeded_originator_switch_info_switchIpV4Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
BGPNeighborPrefixExceeded = ET.SubElement(config, "BGPNeighborPrefixExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(BGPNeighborPrefixExceeded, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop('switchIpV4Address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : integer, default 1
Degrees of freedom.
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof) | Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : integer, default 1
Degrees of freedom. | Below is the the instruction that describes the task:
### Input:
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : integer, default 1
Degrees of freedom.
### Response:
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : integer, default 1
Degrees of freedom.
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof) |
def rosmsg(self):
""":obj:`sensor_msgs.Image` : ROS Image
"""
from cv_bridge import CvBridge, CvBridgeError
cv_bridge = CvBridge()
try:
return cv_bridge.cv2_to_imgmsg(self._data, encoding=self._encoding)
except CvBridgeError as cv_bridge_exception:
logging.error('%s' % (str(cv_bridge_exception))) | :obj:`sensor_msgs.Image` : ROS Image | Below is the the instruction that describes the task:
### Input:
:obj:`sensor_msgs.Image` : ROS Image
### Response:
def rosmsg(self):
""":obj:`sensor_msgs.Image` : ROS Image
"""
from cv_bridge import CvBridge, CvBridgeError
cv_bridge = CvBridge()
try:
return cv_bridge.cv2_to_imgmsg(self._data, encoding=self._encoding)
except CvBridgeError as cv_bridge_exception:
logging.error('%s' % (str(cv_bridge_exception))) |
def _imm_getattribute(self, name):
'''
An immutable's getattribute calculates lazy values when not yet cached in the object then adds
them as attributes.
'''
if _imm_is_init(self):
return _imm_init_getattribute(self, name)
else:
dd = object.__getattribute__(self, '__dict__')
if name == '__dict__': return dd
curval = dd.get(name, dd)
if curval is not dd: return dd[name]
values = _imm_value_data(self)
if name not in values:
return object.__getattribute__(self, name)
(args, memfn, _) = values[name]
value = memfn(*[getattr(self, arg) for arg in args])
dd[name] = value
# if this is a const, it may have checks to run
if name in _imm_const_data(self):
# #TODO
# Note that there's a race condition that eventually needs to be handled here:
# If dd[name] is set then a check fails, there may have been something that read the
# improper value in the meantime
try:
_imm_check(self, [name])
except:
del dd[name]
raise
# if those pass, then we're fine
return value | An immutable's getattribute calculates lazy values when not yet cached in the object then adds
them as attributes. | Below is the the instruction that describes the task:
### Input:
An immutable's getattribute calculates lazy values when not yet cached in the object then adds
them as attributes.
### Response:
def _imm_getattribute(self, name):
'''
An immutable's getattribute calculates lazy values when not yet cached in the object then adds
them as attributes.
'''
if _imm_is_init(self):
return _imm_init_getattribute(self, name)
else:
dd = object.__getattribute__(self, '__dict__')
if name == '__dict__': return dd
curval = dd.get(name, dd)
if curval is not dd: return dd[name]
values = _imm_value_data(self)
if name not in values:
return object.__getattribute__(self, name)
(args, memfn, _) = values[name]
value = memfn(*[getattr(self, arg) for arg in args])
dd[name] = value
# if this is a const, it may have checks to run
if name in _imm_const_data(self):
# #TODO
# Note that there's a race condition that eventually needs to be handled here:
# If dd[name] is set then a check fails, there may have been something that read the
# improper value in the meantime
try:
_imm_check(self, [name])
except:
del dd[name]
raise
# if those pass, then we're fine
return value |
def drop_genes(self, build=None):
"""Delete the genes collection"""
if build:
LOG.info("Dropping the hgnc_gene collection, build %s", build)
self.hgnc_collection.delete_many({'build': build})
else:
LOG.info("Dropping the hgnc_gene collection")
self.hgnc_collection.drop() | Delete the genes collection | Below is the the instruction that describes the task:
### Input:
Delete the genes collection
### Response:
def drop_genes(self, build=None):
"""Delete the genes collection"""
if build:
LOG.info("Dropping the hgnc_gene collection, build %s", build)
self.hgnc_collection.delete_many({'build': build})
else:
LOG.info("Dropping the hgnc_gene collection")
self.hgnc_collection.drop() |
def color_greedy(self):
'''Returns a greedy vertex coloring as an array of ints.'''
n = self.num_vertices()
coloring = np.zeros(n, dtype=int)
for i, nbrs in enumerate(self.adj_list()):
nbr_colors = set(coloring[nbrs])
for c in count(1):
if c not in nbr_colors:
coloring[i] = c
break
return coloring | Returns a greedy vertex coloring as an array of ints. | Below is the the instruction that describes the task:
### Input:
Returns a greedy vertex coloring as an array of ints.
### Response:
def color_greedy(self):
'''Returns a greedy vertex coloring as an array of ints.'''
n = self.num_vertices()
coloring = np.zeros(n, dtype=int)
for i, nbrs in enumerate(self.adj_list()):
nbr_colors = set(coloring[nbrs])
for c in count(1):
if c not in nbr_colors:
coloring[i] = c
break
return coloring |
def encode(self, method, uri):
'''Called by the client to encode Authentication header.'''
if not self.username or not self.password:
return
o = self.options
qop = o.get('qop')
realm = o.get('realm')
nonce = o.get('nonce')
entdig = None
p_parsed = urlparse(uri)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
ha1 = self.ha1(realm, self.password)
ha2 = self.ha2(qop, method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = sha1(s).hexdigest()[:16]
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, ha2)
respdig = self.KD(ha1, noncebit)
elif qop is None:
respdig = self.KD(ha1, "%s:%s" % (nonce, ha2))
else:
# XXX handle auth-int.
return
base = ('username="%s", realm="%s", nonce="%s", uri="%s", '
'response="%s"' % (self.username, realm, nonce, path, respdig))
opaque = o.get('opaque')
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % self.algorithm
if qop:
base += ', qop=%s, nc=%s, cnonce="%s"' % (qop, ncvalue, cnonce)
return 'Digest %s' % base | Called by the client to encode Authentication header. | Below is the the instruction that describes the task:
### Input:
Called by the client to encode Authentication header.
### Response:
def encode(self, method, uri):
'''Called by the client to encode Authentication header.'''
if not self.username or not self.password:
return
o = self.options
qop = o.get('qop')
realm = o.get('realm')
nonce = o.get('nonce')
entdig = None
p_parsed = urlparse(uri)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
ha1 = self.ha1(realm, self.password)
ha2 = self.ha2(qop, method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = sha1(s).hexdigest()[:16]
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, ha2)
respdig = self.KD(ha1, noncebit)
elif qop is None:
respdig = self.KD(ha1, "%s:%s" % (nonce, ha2))
else:
# XXX handle auth-int.
return
base = ('username="%s", realm="%s", nonce="%s", uri="%s", '
'response="%s"' % (self.username, realm, nonce, path, respdig))
opaque = o.get('opaque')
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % self.algorithm
if qop:
base += ', qop=%s, nc=%s, cnonce="%s"' % (qop, ncvalue, cnonce)
return 'Digest %s' % base |
def add(self, byte_sig: str, text_sig: str) -> None:
"""
Adds a new byte - text signature pair to the database.
:param byte_sig: 4-byte signature string
:param text_sig: resolved text signature
:return:
"""
byte_sig = self._normalize_byte_sig(byte_sig)
with SQLiteDB(self.path) as cur:
# ignore new row if it's already in the DB (and would cause a unique constraint error)
cur.execute(
"INSERT OR IGNORE INTO signatures (byte_sig, text_sig) VALUES (?,?)",
(byte_sig, text_sig),
) | Adds a new byte - text signature pair to the database.
:param byte_sig: 4-byte signature string
:param text_sig: resolved text signature
:return: | Below is the the instruction that describes the task:
### Input:
Adds a new byte - text signature pair to the database.
:param byte_sig: 4-byte signature string
:param text_sig: resolved text signature
:return:
### Response:
def add(self, byte_sig: str, text_sig: str) -> None:
"""
Adds a new byte - text signature pair to the database.
:param byte_sig: 4-byte signature string
:param text_sig: resolved text signature
:return:
"""
byte_sig = self._normalize_byte_sig(byte_sig)
with SQLiteDB(self.path) as cur:
# ignore new row if it's already in the DB (and would cause a unique constraint error)
cur.execute(
"INSERT OR IGNORE INTO signatures (byte_sig, text_sig) VALUES (?,?)",
(byte_sig, text_sig),
) |
def put(self, thing_id='0', action_name=None, action_id=None):
"""
Handle a PUT request.
TODO: this is not yet defined in the spec
thing_id -- ID of the thing this request is for
action_name -- name of the action from the URL path
action_id -- the action ID from the URL path
"""
thing = self.get_thing(thing_id)
if thing is None:
self.set_status(404)
return
self.set_status(200) | Handle a PUT request.
TODO: this is not yet defined in the spec
thing_id -- ID of the thing this request is for
action_name -- name of the action from the URL path
action_id -- the action ID from the URL path | Below is the the instruction that describes the task:
### Input:
Handle a PUT request.
TODO: this is not yet defined in the spec
thing_id -- ID of the thing this request is for
action_name -- name of the action from the URL path
action_id -- the action ID from the URL path
### Response:
def put(self, thing_id='0', action_name=None, action_id=None):
"""
Handle a PUT request.
TODO: this is not yet defined in the spec
thing_id -- ID of the thing this request is for
action_name -- name of the action from the URL path
action_id -- the action ID from the URL path
"""
thing = self.get_thing(thing_id)
if thing is None:
self.set_status(404)
return
self.set_status(200) |
def _read_object(self, correlation_id, parameters):
"""
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
"""
path = self.get_path()
if path == None:
raise ConfigException(correlation_id, "NO_PATH", "Missing config file path")
if not os.path.isfile(path):
raise FileException(correlation_id, 'FILE_NOT_FOUND', 'Config file was not found at ' + path)
try:
with open(path, 'r') as file:
config = file.read()
config = self._parameterize(config, parameters)
return yaml.load(config)
except Exception as ex:
raise FileException(
correlation_id,
"READ_FAILED",
"Failed reading configuration " + path + ": " + str(ex)
).with_details("path", path).with_cause(ex) | Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration. | Below is the the instruction that describes the task:
### Input:
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
### Response:
def _read_object(self, correlation_id, parameters):
"""
Reads configuration file, parameterizes its content and converts it into JSON object.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration.
:return: a JSON object with configuration.
"""
path = self.get_path()
if path == None:
raise ConfigException(correlation_id, "NO_PATH", "Missing config file path")
if not os.path.isfile(path):
raise FileException(correlation_id, 'FILE_NOT_FOUND', 'Config file was not found at ' + path)
try:
with open(path, 'r') as file:
config = file.read()
config = self._parameterize(config, parameters)
return yaml.load(config)
except Exception as ex:
raise FileException(
correlation_id,
"READ_FAILED",
"Failed reading configuration " + path + ": " + str(ex)
).with_details("path", path).with_cause(ex) |
def tokenize(sentence):
"""
Converts a single sentence into a list of individual significant units
Args:
sentence (str): Input string ie. 'This is a sentence.'
Returns:
list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence']
"""
tokens = []
class Vars:
start_pos = -1
last_type = 'o'
def update(c, i):
if c.isalpha() or c in '-{}':
t = 'a'
elif c.isdigit() or c == '#':
t = 'n'
elif c.isspace():
t = 's'
else:
t = 'o'
if t != Vars.last_type or t == 'o':
if Vars.start_pos >= 0:
token = sentence[Vars.start_pos:i].lower()
if token not in '.!?':
tokens.append(token)
Vars.start_pos = -1 if t == 's' else i
Vars.last_type = t
for i, char in enumerate(sentence):
update(char, i)
update(' ', len(sentence))
return tokens | Converts a single sentence into a list of individual significant units
Args:
sentence (str): Input string ie. 'This is a sentence.'
Returns:
list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence'] | Below is the the instruction that describes the task:
### Input:
Converts a single sentence into a list of individual significant units
Args:
sentence (str): Input string ie. 'This is a sentence.'
Returns:
list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence']
### Response:
def tokenize(sentence):
"""
Converts a single sentence into a list of individual significant units
Args:
sentence (str): Input string ie. 'This is a sentence.'
Returns:
list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence']
"""
tokens = []
class Vars:
start_pos = -1
last_type = 'o'
def update(c, i):
if c.isalpha() or c in '-{}':
t = 'a'
elif c.isdigit() or c == '#':
t = 'n'
elif c.isspace():
t = 's'
else:
t = 'o'
if t != Vars.last_type or t == 'o':
if Vars.start_pos >= 0:
token = sentence[Vars.start_pos:i].lower()
if token not in '.!?':
tokens.append(token)
Vars.start_pos = -1 if t == 's' else i
Vars.last_type = t
for i, char in enumerate(sentence):
update(char, i)
update(' ', len(sentence))
return tokens |
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return [mapper.get_property_by_column(column) for column in mapper.primary_key] | Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class | Below is the the instruction that describes the task:
### Input:
Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
### Response:
def get_primary_keys(model):
"""Get primary key properties for a SQLAlchemy model.
:param model: SQLAlchemy model class
"""
mapper = model.__mapper__
return [mapper.get_property_by_column(column) for column in mapper.primary_key] |
def wif(self, is_compressed=None):
"""
Return the WIF representation of this key, if available.
"""
secret_exponent = self.secret_exponent()
if secret_exponent is None:
return None
if is_compressed is None:
is_compressed = self.is_compressed()
blob = to_bytes_32(secret_exponent)
if is_compressed:
blob += b'\01'
return self._network.wif_for_blob(blob) | Return the WIF representation of this key, if available. | Below is the the instruction that describes the task:
### Input:
Return the WIF representation of this key, if available.
### Response:
def wif(self, is_compressed=None):
"""
Return the WIF representation of this key, if available.
"""
secret_exponent = self.secret_exponent()
if secret_exponent is None:
return None
if is_compressed is None:
is_compressed = self.is_compressed()
blob = to_bytes_32(secret_exponent)
if is_compressed:
blob += b'\01'
return self._network.wif_for_blob(blob) |
def getrepaymentsurl(idcred, *args, **kwargs):
"""Request loan Repayments URL.
If idcred is set, you'll get a response adequate for a
MambuRepayments object. There's a MambuRepayment object too, but
you'll get a list first and each element of it will be automatically
converted to a MambuRepayment object that you may use.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for repayments of one and just
one loan account.
See mamburepayment module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
url = getmambuurl(*args,**kwargs) + "loans/" + idcred + "/repayments"
return url | Request loan Repayments URL.
If idcred is set, you'll get a response adequate for a
MambuRepayments object. There's a MambuRepayment object too, but
you'll get a list first and each element of it will be automatically
converted to a MambuRepayment object that you may use.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for repayments of one and just
one loan account.
See mamburepayment module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future. | Below is the the instruction that describes the task:
### Input:
Request loan Repayments URL.
If idcred is set, you'll get a response adequate for a
MambuRepayments object. There's a MambuRepayment object too, but
you'll get a list first and each element of it will be automatically
converted to a MambuRepayment object that you may use.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for repayments of one and just
one loan account.
See mamburepayment module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
### Response:
def getrepaymentsurl(idcred, *args, **kwargs):
"""Request loan Repayments URL.
If idcred is set, you'll get a response adequate for a
MambuRepayments object. There's a MambuRepayment object too, but
you'll get a list first and each element of it will be automatically
converted to a MambuRepayment object that you may use.
If not set, you'll get a Jar Jar Binks object, or something quite
strange and useless as JarJar. A MambuError must likely since I
haven't needed it for anything but for repayments of one and just
one loan account.
See mamburepayment module and pydoc for further information.
No current implemented filter parameters.
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
url = getmambuurl(*args,**kwargs) + "loans/" + idcred + "/repayments"
return url |
def _check_shape(var, name, shape, shape2=None):
r"""Check that <var> has shape <shape>; if false raise ValueError(name)"""
varshape = np.shape(var)
if shape != varshape:
if shape2:
if shape2 != varshape:
print('* ERROR :: Parameter ' + name + ' has wrong shape!' +
' : ' + str(varshape) + ' instead of ' + str(shape) +
'or' + str(shape2) + '.')
raise ValueError(name)
else:
print('* ERROR :: Parameter ' + name + ' has wrong shape! : ' +
str(varshape) + ' instead of ' + str(shape) + '.')
raise ValueError(name) | r"""Check that <var> has shape <shape>; if false raise ValueError(name) | Below is the the instruction that describes the task:
### Input:
r"""Check that <var> has shape <shape>; if false raise ValueError(name)
### Response:
def _check_shape(var, name, shape, shape2=None):
r"""Check that <var> has shape <shape>; if false raise ValueError(name)"""
varshape = np.shape(var)
if shape != varshape:
if shape2:
if shape2 != varshape:
print('* ERROR :: Parameter ' + name + ' has wrong shape!' +
' : ' + str(varshape) + ' instead of ' + str(shape) +
'or' + str(shape2) + '.')
raise ValueError(name)
else:
print('* ERROR :: Parameter ' + name + ' has wrong shape! : ' +
str(varshape) + ' instead of ' + str(shape) + '.')
raise ValueError(name) |
def ConsultarUltimoComprobante(self, tipo_cbte=151, pto_vta=1):
"Consulta el último No de Comprobante registrado"
ret = self.client.consultarUltimoNroComprobantePorPtoVta(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
solicitud={
'puntoVenta': pto_vta,
'tipoComprobante': tipo_cbte},
)
ret = ret['respuesta']
self.__analizar_errores(ret)
self.NroComprobante = ret['nroComprobante']
return True | Consulta el último No de Comprobante registrado | Below is the the instruction that describes the task:
### Input:
Consulta el último No de Comprobante registrado
### Response:
def ConsultarUltimoComprobante(self, tipo_cbte=151, pto_vta=1):
"Consulta el último No de Comprobante registrado"
ret = self.client.consultarUltimoNroComprobantePorPtoVta(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
solicitud={
'puntoVenta': pto_vta,
'tipoComprobante': tipo_cbte},
)
ret = ret['respuesta']
self.__analizar_errores(ret)
self.NroComprobante = ret['nroComprobante']
return True |
def selfSignCert(self, cert, pkey):
'''
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
'''
cert.set_issuer(cert.get_subject())
cert.sign(pkey, self.signing_digest) | Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None | Below is the the instruction that describes the task:
### Input:
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
### Response:
def selfSignCert(self, cert, pkey):
'''
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
'''
cert.set_issuer(cert.get_subject())
cert.sign(pkey, self.signing_digest) |
def partial_distance_correlation(x, y, z): # pylint:disable=too-many-locals
"""
Partial distance correlation estimator.
Compute the estimator for the partial distance correlation of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance correlation
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance correlation.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1], [1], [2], [2], [3]])
>>> b = np.array([[1], [2], [1], [2], [1]])
>>> c = np.array([[1], [2], [2], [1], [2]])
>>> dcor.partial_distance_correlation(a, a, c)
1.0
>>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS
-0.5...
>>> dcor.partial_distance_correlation(b, b, c)
1.0
>>> dcor.partial_distance_correlation(a, c, c)
0.0
"""
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
aa = u_product(a, a)
bb = u_product(b, b)
cc = u_product(c, c)
ab = u_product(a, b)
ac = u_product(a, c)
bc = u_product(b, c)
denom_sqr = aa * bb
r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xy = np.clip(r_xy, -1, 1)
denom_sqr = aa * cc
r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xz = np.clip(r_xz, -1, 1)
denom_sqr = bb * cc
r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_yz = np.clip(r_yz, -1, 1)
denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2)
return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom | Partial distance correlation estimator.
Compute the estimator for the partial distance correlation of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance correlation
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance correlation.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1], [1], [2], [2], [3]])
>>> b = np.array([[1], [2], [1], [2], [1]])
>>> c = np.array([[1], [2], [2], [1], [2]])
>>> dcor.partial_distance_correlation(a, a, c)
1.0
>>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS
-0.5...
>>> dcor.partial_distance_correlation(b, b, c)
1.0
>>> dcor.partial_distance_correlation(a, c, c)
0.0 | Below is the the instruction that describes the task:
### Input:
Partial distance correlation estimator.
Compute the estimator for the partial distance correlation of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance correlation
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance correlation.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1], [1], [2], [2], [3]])
>>> b = np.array([[1], [2], [1], [2], [1]])
>>> c = np.array([[1], [2], [2], [1], [2]])
>>> dcor.partial_distance_correlation(a, a, c)
1.0
>>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS
-0.5...
>>> dcor.partial_distance_correlation(b, b, c)
1.0
>>> dcor.partial_distance_correlation(a, c, c)
0.0
### Response:
def partial_distance_correlation(x, y, z): # pylint:disable=too-many-locals
"""
Partial distance correlation estimator.
Compute the estimator for the partial distance correlation of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance correlation
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance correlation.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1], [1], [2], [2], [3]])
>>> b = np.array([[1], [2], [1], [2], [1]])
>>> c = np.array([[1], [2], [2], [1], [2]])
>>> dcor.partial_distance_correlation(a, a, c)
1.0
>>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS
-0.5...
>>> dcor.partial_distance_correlation(b, b, c)
1.0
>>> dcor.partial_distance_correlation(a, c, c)
0.0
"""
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
aa = u_product(a, a)
bb = u_product(b, b)
cc = u_product(c, c)
ab = u_product(a, b)
ac = u_product(a, c)
bc = u_product(b, c)
denom_sqr = aa * bb
r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xy = np.clip(r_xy, -1, 1)
denom_sqr = aa * cc
r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xz = np.clip(r_xz, -1, 1)
denom_sqr = bb * cc
r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_yz = np.clip(r_yz, -1, 1)
denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2)
return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom |
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink) | Generate a DownloadJob that actually triggers a file download. | Below is the the instruction that describes the task:
### Input:
Generate a DownloadJob that actually triggers a file download.
### Response:
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink) |
def status(name='all'):
'''
Using drbdadm to show status of the DRBD devices,
available in the latest drbd9.
Support multiple nodes, multiple volumes.
:type name: str
:param name:
Resource name.
:return: drbd status of resource.
:rtype: list(dict(res))
CLI Example:
.. code-block:: bash
salt '*' drbd.status
salt '*' drbd.status name=<resource name>
'''
# Initialize for multiple times test cases
global ret
global resource
ret = []
resource = {}
cmd = ['drbdadm', 'status']
cmd.append(name)
#One possible output: (number of resource/node/vol are flexible)
#resource role:Secondary
# volume:0 disk:Inconsistent
# volume:1 disk:Inconsistent
# drbd-node1 role:Primary
# volume:0 replication:SyncTarget peer-disk:UpToDate done:10.17
# volume:1 replication:SyncTarget peer-disk:UpToDate done:74.08
# drbd-node2 role:Secondary
# volume:0 peer-disk:Inconsistent resync-suspended:peer
# volume:1 peer-disk:Inconsistent resync-suspended:peer
for line in __salt__['cmd.run'](cmd).splitlines():
_line_parser(line)
if resource:
ret.append(resource)
return ret | Using drbdadm to show status of the DRBD devices,
available in the latest drbd9.
Support multiple nodes, multiple volumes.
:type name: str
:param name:
Resource name.
:return: drbd status of resource.
:rtype: list(dict(res))
CLI Example:
.. code-block:: bash
salt '*' drbd.status
salt '*' drbd.status name=<resource name> | Below is the the instruction that describes the task:
### Input:
Using drbdadm to show status of the DRBD devices,
available in the latest drbd9.
Support multiple nodes, multiple volumes.
:type name: str
:param name:
Resource name.
:return: drbd status of resource.
:rtype: list(dict(res))
CLI Example:
.. code-block:: bash
salt '*' drbd.status
salt '*' drbd.status name=<resource name>
### Response:
def status(name='all'):
'''
Using drbdadm to show status of the DRBD devices,
available in the latest drbd9.
Support multiple nodes, multiple volumes.
:type name: str
:param name:
Resource name.
:return: drbd status of resource.
:rtype: list(dict(res))
CLI Example:
.. code-block:: bash
salt '*' drbd.status
salt '*' drbd.status name=<resource name>
'''
# Initialize for multiple times test cases
global ret
global resource
ret = []
resource = {}
cmd = ['drbdadm', 'status']
cmd.append(name)
#One possible output: (number of resource/node/vol are flexible)
#resource role:Secondary
# volume:0 disk:Inconsistent
# volume:1 disk:Inconsistent
# drbd-node1 role:Primary
# volume:0 replication:SyncTarget peer-disk:UpToDate done:10.17
# volume:1 replication:SyncTarget peer-disk:UpToDate done:74.08
# drbd-node2 role:Secondary
# volume:0 peer-disk:Inconsistent resync-suspended:peer
# volume:1 peer-disk:Inconsistent resync-suspended:peer
for line in __salt__['cmd.run'](cmd).splitlines():
_line_parser(line)
if resource:
ret.append(resource)
return ret |
def colorbar(ax, im, fig=None, loc="right", size="5%", pad="3%"):
"""
Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar.
"""
if fig is None:
fig = ax.get_figure()
# _pdb.set_trace()
if loc == "left" or loc == "right":
width = fig.get_figwidth()
new = width * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure width: {}'.format(new))
# fig.set_size_inches(new, fig.get_figheight(), forward=True)
elif loc == "top" or loc == "bottom":
height = fig.get_figheight()
new = height * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure height: {}'.format(new))
# fig.set_figheight(fig.get_figwidth(), new, forward=True)
divider = _ag1.make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=pad)
return cax, _plt.colorbar(im, cax=cax) | Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar. | Below is the the instruction that describes the task:
### Input:
Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar.
### Response:
def colorbar(ax, im, fig=None, loc="right", size="5%", pad="3%"):
"""
Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar.
"""
if fig is None:
fig = ax.get_figure()
# _pdb.set_trace()
if loc == "left" or loc == "right":
width = fig.get_figwidth()
new = width * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure width: {}'.format(new))
# fig.set_size_inches(new, fig.get_figheight(), forward=True)
elif loc == "top" or loc == "bottom":
height = fig.get_figheight()
new = height * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure height: {}'.format(new))
# fig.set_figheight(fig.get_figwidth(), new, forward=True)
divider = _ag1.make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=pad)
return cax, _plt.colorbar(im, cax=cax) |
def tenant_get(tenant_id=None, name=None, profile=None,
**connection_args):
'''
Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for tenant in getattr(kstone, _TENANTS, None).list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = getattr(kstone, _TENANTS, None).get(tenant_id)
ret[tenant.name] = dict((value, getattr(tenant, value)) for value in dir(tenant)
if not value.startswith('_') and
isinstance(getattr(tenant, value), (six.string_types, dict, bool)))
return ret | Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova | Below is the the instruction that describes the task:
### Input:
Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova
### Response:
def tenant_get(tenant_id=None, name=None, profile=None,
**connection_args):
'''
Return a specific tenants (keystone tenant-get)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_get c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_get name=nova
'''
kstone = auth(profile, **connection_args)
ret = {}
if name:
for tenant in getattr(kstone, _TENANTS, None).list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
tenant = getattr(kstone, _TENANTS, None).get(tenant_id)
ret[tenant.name] = dict((value, getattr(tenant, value)) for value in dir(tenant)
if not value.startswith('_') and
isinstance(getattr(tenant, value), (six.string_types, dict, bool)))
return ret |
def send_request(self, api_name, *api_args, **api_kwargs):
"""Refer to SDK API documentation.
:param api_name: SDK API name
:param *api_args: SDK API sequence parameters
:param **api_kwargs: SDK API keyword parameters
"""
return self.conn.request(api_name, *api_args, **api_kwargs) | Refer to SDK API documentation.
:param api_name: SDK API name
:param *api_args: SDK API sequence parameters
:param **api_kwargs: SDK API keyword parameters | Below is the the instruction that describes the task:
### Input:
Refer to SDK API documentation.
:param api_name: SDK API name
:param *api_args: SDK API sequence parameters
:param **api_kwargs: SDK API keyword parameters
### Response:
def send_request(self, api_name, *api_args, **api_kwargs):
"""Refer to SDK API documentation.
:param api_name: SDK API name
:param *api_args: SDK API sequence parameters
:param **api_kwargs: SDK API keyword parameters
"""
return self.conn.request(api_name, *api_args, **api_kwargs) |
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if isinstance(a, bytes) and isinstance(b, bytes) and not PY2:
for x, y in izip(a, b):
rv |= x ^ y
else:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0 | This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7 | Below is the the instruction that describes the task:
### Input:
This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
### Response:
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if isinstance(a, bytes) and isinstance(b, bytes) and not PY2:
for x, y in izip(a, b):
rv |= x ^ y
else:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0 |
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
if attribute in set(['http_equiv']) or attribute.startswith('data_'):
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute | Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax | Below is the the instruction that describes the task:
### Input:
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
### Response:
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
if attribute in set(['http_equiv']) or attribute.startswith('data_'):
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute |
def engineer_list_commands(self):
"""
Returns a list of all api end points (commands) exposed
to engineer
"""
rv = []
for k in dir(self):
attr = getattr(self,k)
if type(getattr(attr, "engineer", None)) == dict:
rv.append(k)
return rv | Returns a list of all api end points (commands) exposed
to engineer | Below is the the instruction that describes the task:
### Input:
Returns a list of all api end points (commands) exposed
to engineer
### Response:
def engineer_list_commands(self):
"""
Returns a list of all api end points (commands) exposed
to engineer
"""
rv = []
for k in dir(self):
attr = getattr(self,k)
if type(getattr(attr, "engineer", None)) == dict:
rv.append(k)
return rv |
def get_inferred_data_table(table, pc):
"""
Table level: Dive down, calculate data, then return the new table with the inferred data.
:param str pc: paleo or chron
:param dict table: Metadata
:return dict table: Metadata
"""
age = None
if pc == "paleo":
# Get the age values data first, since it's needed to calculate the other column data.
age = _get_age(table["columns"])
try:
# If age values were not found, then skip resolution.
if age:
# Loop for all the columns in the table
for var, col in table["columns"].items():
# Special cases
# We do not calculate data for each of the keys below, and we cannot calculate any "string" data
if "age" in var or "year" in var:
# Calculate m/m/m/m, but not resolution
table["columns"][var] = _get_inferred_data_column(col)
elif not all(isinstance(i, str) for i in col["values"]):
# Calculate m/m/m/m and resolution
table["columns"][var] = _get_inferred_data_res(col, age)
else:
# Fall through case. No calculations made.
logger_inferred_data.info("get_inferred_data_table: "
"Not calculating inferred data for variableName: {}".format(var))
# If there isn't an age, still calculate the m/m/m/m for the column values.
else:
for var, col in table["columns"].items():
if not all(isinstance(i, str) for i in col["values"]):
# Calculate m/m/m/m and resolution
table["columns"][var] = _get_inferred_data_column(col)
else:
# Fall through case. No calculations made.
logger_inferred_data.info("get_inferred_data_table: "
"Not calculating inferred data for variableName: {}".format(var))
except AttributeError as e:
logger_inferred_data.warn("get_inferred_data_table: AttributeError: {}".format(e))
except Exception as e:
logger_inferred_data.warn("get_inferred_data_table: Exception: {}".format(e))
table["columns"] = _fix_numeric_types(table["columns"])
return table | Table level: Dive down, calculate data, then return the new table with the inferred data.
:param str pc: paleo or chron
:param dict table: Metadata
:return dict table: Metadata | Below is the the instruction that describes the task:
### Input:
Table level: Dive down, calculate data, then return the new table with the inferred data.
:param str pc: paleo or chron
:param dict table: Metadata
:return dict table: Metadata
### Response:
def get_inferred_data_table(table, pc):
"""
Table level: Dive down, calculate data, then return the new table with the inferred data.
:param str pc: paleo or chron
:param dict table: Metadata
:return dict table: Metadata
"""
age = None
if pc == "paleo":
# Get the age values data first, since it's needed to calculate the other column data.
age = _get_age(table["columns"])
try:
# If age values were not found, then skip resolution.
if age:
# Loop for all the columns in the table
for var, col in table["columns"].items():
# Special cases
# We do not calculate data for each of the keys below, and we cannot calculate any "string" data
if "age" in var or "year" in var:
# Calculate m/m/m/m, but not resolution
table["columns"][var] = _get_inferred_data_column(col)
elif not all(isinstance(i, str) for i in col["values"]):
# Calculate m/m/m/m and resolution
table["columns"][var] = _get_inferred_data_res(col, age)
else:
# Fall through case. No calculations made.
logger_inferred_data.info("get_inferred_data_table: "
"Not calculating inferred data for variableName: {}".format(var))
# If there isn't an age, still calculate the m/m/m/m for the column values.
else:
for var, col in table["columns"].items():
if not all(isinstance(i, str) for i in col["values"]):
# Calculate m/m/m/m and resolution
table["columns"][var] = _get_inferred_data_column(col)
else:
# Fall through case. No calculations made.
logger_inferred_data.info("get_inferred_data_table: "
"Not calculating inferred data for variableName: {}".format(var))
except AttributeError as e:
logger_inferred_data.warn("get_inferred_data_table: AttributeError: {}".format(e))
except Exception as e:
logger_inferred_data.warn("get_inferred_data_table: Exception: {}".format(e))
table["columns"] = _fix_numeric_types(table["columns"])
return table |
def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init | Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var | Below is the the instruction that describes the task:
### Input:
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
### Response:
def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init |
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = self.get_queryset(request).filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError(
'Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form class
folder_form.clean = folder_form_clean
return folder_form | Returns a Form class for use in the admin add view. This is used by
add_view and change_view. | Below is the the instruction that describes the task:
### Input:
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
### Response:
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.GET.get('parent_id', None)
if not parent_id:
parent_id = request.POST.get('parent_id', None)
if parent_id:
return AddFolderPopupForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = self.get_queryset(request).filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError(
'Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form class
folder_form.clean = folder_form_clean
return folder_form |
def set_quoted_text(self, quoted_text):
"""Sets the context's ``quoted_text`` flag. Useful when entering and exiting quoted text tokens."""
self.quoted_text = quoted_text
self.line_comment = False
return self | Sets the context's ``quoted_text`` flag. Useful when entering and exiting quoted text tokens. | Below is the the instruction that describes the task:
### Input:
Sets the context's ``quoted_text`` flag. Useful when entering and exiting quoted text tokens.
### Response:
def set_quoted_text(self, quoted_text):
"""Sets the context's ``quoted_text`` flag. Useful when entering and exiting quoted text tokens."""
self.quoted_text = quoted_text
self.line_comment = False
return self |
def add_bytes(self, data, **kwargs):
"""Adds a set of bytes as a file to IPFS.
.. code-block:: python
>>> c.add_bytes(b"Mary had a little lamb")
'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab'
Also accepts and will stream generator objects.
Parameters
----------
data : bytes
Content to be added as a file
Returns
-------
str : Hash of the added IPFS object
"""
body, headers = multipart.stream_bytes(data, self.chunk_size)
return self._client.request('/add', decoder='json',
data=body, headers=headers, **kwargs) | Adds a set of bytes as a file to IPFS.
.. code-block:: python
>>> c.add_bytes(b"Mary had a little lamb")
'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab'
Also accepts and will stream generator objects.
Parameters
----------
data : bytes
Content to be added as a file
Returns
-------
str : Hash of the added IPFS object | Below is the the instruction that describes the task:
### Input:
Adds a set of bytes as a file to IPFS.
.. code-block:: python
>>> c.add_bytes(b"Mary had a little lamb")
'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab'
Also accepts and will stream generator objects.
Parameters
----------
data : bytes
Content to be added as a file
Returns
-------
str : Hash of the added IPFS object
### Response:
def add_bytes(self, data, **kwargs):
"""Adds a set of bytes as a file to IPFS.
.. code-block:: python
>>> c.add_bytes(b"Mary had a little lamb")
'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab'
Also accepts and will stream generator objects.
Parameters
----------
data : bytes
Content to be added as a file
Returns
-------
str : Hash of the added IPFS object
"""
body, headers = multipart.stream_bytes(data, self.chunk_size)
return self._client.request('/add', decoder='json',
data=body, headers=headers, **kwargs) |
def remove_end_NaN(self, data, var=None):
""" Remove end NaN.
CHECK: Note issue with multi-column df.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
var : list(str)
List that specifies specific columns of dataframe.
Returns
-------
pd.DataFrame()
Dataframe starting from its last valid index.
"""
# Limit to one or some variables
if var:
end_ok_data = data[var].last_valid_index()
else:
end_ok_data = data.last_valid_index()
data = data.loc[:end_ok_data, :]
return data | Remove end NaN.
CHECK: Note issue with multi-column df.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
var : list(str)
List that specifies specific columns of dataframe.
Returns
-------
pd.DataFrame()
Dataframe starting from its last valid index. | Below is the the instruction that describes the task:
### Input:
Remove end NaN.
CHECK: Note issue with multi-column df.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
var : list(str)
List that specifies specific columns of dataframe.
Returns
-------
pd.DataFrame()
Dataframe starting from its last valid index.
### Response:
def remove_end_NaN(self, data, var=None):
""" Remove end NaN.
CHECK: Note issue with multi-column df.
Parameters
----------
data : pd.DataFrame()
Input dataframe.
var : list(str)
List that specifies specific columns of dataframe.
Returns
-------
pd.DataFrame()
Dataframe starting from its last valid index.
"""
# Limit to one or some variables
if var:
end_ok_data = data[var].last_valid_index()
else:
end_ok_data = data.last_valid_index()
data = data.loc[:end_ok_data, :]
return data |
def getElementById(id: str) -> Optional[Node]:
"""Get element with ``id``."""
elm = Element._elements_with_id.get(id)
return elm | Get element with ``id``. | Below is the the instruction that describes the task:
### Input:
Get element with ``id``.
### Response:
def getElementById(id: str) -> Optional[Node]:
"""Get element with ``id``."""
elm = Element._elements_with_id.get(id)
return elm |
def update_user_password(new_pwd_user_id, new_password,**kwargs):
"""
Update a user's password
"""
#check_perm(kwargs.get('user_id'), 'edit_user')
try:
user_i = db.DBSession.query(User).filter(User.id==new_pwd_user_id).one()
user_i.password = bcrypt.hashpw(str(new_password).encode('utf-8'), bcrypt.gensalt())
return user_i
except NoResultFound:
raise ResourceNotFoundError("User (id=%s) not found"%(new_pwd_user_id)) | Update a user's password | Below is the the instruction that describes the task:
### Input:
Update a user's password
### Response:
def update_user_password(new_pwd_user_id, new_password,**kwargs):
"""
Update a user's password
"""
#check_perm(kwargs.get('user_id'), 'edit_user')
try:
user_i = db.DBSession.query(User).filter(User.id==new_pwd_user_id).one()
user_i.password = bcrypt.hashpw(str(new_password).encode('utf-8'), bcrypt.gensalt())
return user_i
except NoResultFound:
raise ResourceNotFoundError("User (id=%s) not found"%(new_pwd_user_id)) |
def _get_hba_type(hba_type):
'''
Convert a string representation of a HostHostBusAdapter into an
object reference.
'''
if hba_type == "parallel":
return vim.host.ParallelScsiHba
elif hba_type == "block":
return vim.host.BlockHba
elif hba_type == "iscsi":
return vim.host.InternetScsiHba
elif hba_type == "fibre":
return vim.host.FibreChannelHba
raise ValueError('Unknown Host Bus Adapter Type') | Convert a string representation of a HostHostBusAdapter into an
object reference. | Below is the the instruction that describes the task:
### Input:
Convert a string representation of a HostHostBusAdapter into an
object reference.
### Response:
def _get_hba_type(hba_type):
'''
Convert a string representation of a HostHostBusAdapter into an
object reference.
'''
if hba_type == "parallel":
return vim.host.ParallelScsiHba
elif hba_type == "block":
return vim.host.BlockHba
elif hba_type == "iscsi":
return vim.host.InternetScsiHba
elif hba_type == "fibre":
return vim.host.FibreChannelHba
raise ValueError('Unknown Host Bus Adapter Type') |
def premier_da_la_annee(jd):
'''Determine the year in the French revolutionary calendar in which a given Julian day falls.
Returns Julian day number containing fall equinox (first day of the FR year)'''
p = ephem.previous_fall_equinox(dublin.from_jd(jd))
previous = trunc(dublin.to_jd(p) - 0.5) + 0.5
if previous + 364 < jd:
# test if current day is the equinox if the previous equinox was a long time ago
n = ephem.next_fall_equinox(dublin.from_jd(jd))
nxt = trunc(dublin.to_jd(n) - 0.5) + 0.5
if nxt <= jd:
return nxt
return previous | Determine the year in the French revolutionary calendar in which a given Julian day falls.
Returns Julian day number containing fall equinox (first day of the FR year) | Below is the the instruction that describes the task:
### Input:
Determine the year in the French revolutionary calendar in which a given Julian day falls.
Returns Julian day number containing fall equinox (first day of the FR year)
### Response:
def premier_da_la_annee(jd):
'''Determine the year in the French revolutionary calendar in which a given Julian day falls.
Returns Julian day number containing fall equinox (first day of the FR year)'''
p = ephem.previous_fall_equinox(dublin.from_jd(jd))
previous = trunc(dublin.to_jd(p) - 0.5) + 0.5
if previous + 364 < jd:
# test if current day is the equinox if the previous equinox was a long time ago
n = ephem.next_fall_equinox(dublin.from_jd(jd))
nxt = trunc(dublin.to_jd(n) - 0.5) + 0.5
if nxt <= jd:
return nxt
return previous |
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta() | Age of the video | Below is the the instruction that describes the task:
### Input:
Age of the video
### Response:
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta() |
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
"""
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth | This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements. | Below is the the instruction that describes the task:
### Input:
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
### Response:
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
"""
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth |
def introspect_access_token(self, access_token_value):
# type: (str) -> Dict[str, Union[str, List[str]]]
"""
Returns authorization data associated with the access token.
See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>.
"""
if access_token_value not in self.access_tokens:
raise InvalidAccessToken('{} unknown'.format(access_token_value))
authz_info = self.access_tokens[access_token_value]
introspection = {'active': authz_info['exp'] >= int(time.time())}
introspection_params = {k: v for k, v in authz_info.items() if k in TokenIntrospectionResponse.c_param}
introspection.update(introspection_params)
return introspection | Returns authorization data associated with the access token.
See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>. | Below is the the instruction that describes the task:
### Input:
Returns authorization data associated with the access token.
See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>.
### Response:
def introspect_access_token(self, access_token_value):
# type: (str) -> Dict[str, Union[str, List[str]]]
"""
Returns authorization data associated with the access token.
See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>.
"""
if access_token_value not in self.access_tokens:
raise InvalidAccessToken('{} unknown'.format(access_token_value))
authz_info = self.access_tokens[access_token_value]
introspection = {'active': authz_info['exp'] >= int(time.time())}
introspection_params = {k: v for k, v in authz_info.items() if k in TokenIntrospectionResponse.c_param}
introspection.update(introspection_params)
return introspection |
def case_synopsis(institute_id, case_name):
"""Update (PUT) synopsis of a specific case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
new_synopsis = request.form.get('synopsis')
controllers.update_synopsis(store, institute_obj, case_obj, user_obj, new_synopsis)
return redirect(request.referrer) | Update (PUT) synopsis of a specific case. | Below is the the instruction that describes the task:
### Input:
Update (PUT) synopsis of a specific case.
### Response:
def case_synopsis(institute_id, case_name):
"""Update (PUT) synopsis of a specific case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
new_synopsis = request.form.get('synopsis')
controllers.update_synopsis(store, institute_obj, case_obj, user_obj, new_synopsis)
return redirect(request.referrer) |
def get_functions(fname):
""" get a list of functions from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line.strip()[0:4] == 'def ':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#')[4:], ':') + '</PRE>\n'
if line[0:5] == 'class':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#'), ':') + '</PRE>\n'
return txt + '<BR>' | get a list of functions from a Python program | Below is the the instruction that describes the task:
### Input:
get a list of functions from a Python program
### Response:
def get_functions(fname):
""" get a list of functions from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line.strip()[0:4] == 'def ':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#')[4:], ':') + '</PRE>\n'
if line[0:5] == 'class':
txt += '<PRE>' + strip_text_after_string(strip_text_after_string(line, '#'), ':') + '</PRE>\n'
return txt + '<BR>' |
def begin_commit():
""" Allow a client to begin a commit and acquire the write lock """
session_token = request.headers['session_token']
repository = request.headers['repository']
#===
current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token)
if current_user is False: return fail(user_auth_fail_msg)
#===
repository_path = config['repositories'][repository]['path']
def with_exclusive_lock():
# The commit is locked for a given time period to a given session token,
# a client must hold this lock to use any of push_file(), delete_files() and commit().
# It does not matter if the user lock technically expires while a client is writing
# a large file, as the user lock is locked using flock for the duration of any
# operation and thus cannot be stolen by another client. It is updated to be in
# the future before returning to the client. The lock only needs to survive until
# the client owning the lock sends another request and re acquires the flock.
if not can_aquire_user_lock(repository_path, session_token): return fail(lock_fail_msg)
# Commits can only take place if the committing user has the latest revision,
# as committing from an outdated state could cause unexpected results, and may
# have conflicts. Conflicts are resolved during a client update so they are
# handled by the client, and a server interface for this is not needed.
data_store = versioned_storage(repository_path)
if data_store.get_head() != request.headers["previous_revision"]: return fail(need_to_update_msg)
# Should the lock expire, the client which had the lock previously will be unable
# to continue the commit it had in progress. When this, or another client attempts
# to commit again it must do so by first obtaining the lock again by calling begin_commit().
# Any remaining commit data from failed prior commits is garbage collected here.
# While it would technically be possible to implement commit resume should the same
# client resume, I only see commits failing due to a network error and this is so
# rare I don't think it's worth the trouble.
if data_store.have_active_commit(): data_store.rollback()
#------------
data_store.begin()
update_user_lock(repository_path, session_token)
return success()
return lock_access(repository_path, with_exclusive_lock) | Allow a client to begin a commit and acquire the write lock | Below is the the instruction that describes the task:
### Input:
Allow a client to begin a commit and acquire the write lock
### Response:
def begin_commit():
""" Allow a client to begin a commit and acquire the write lock """
session_token = request.headers['session_token']
repository = request.headers['repository']
#===
current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token)
if current_user is False: return fail(user_auth_fail_msg)
#===
repository_path = config['repositories'][repository]['path']
def with_exclusive_lock():
# The commit is locked for a given time period to a given session token,
# a client must hold this lock to use any of push_file(), delete_files() and commit().
# It does not matter if the user lock technically expires while a client is writing
# a large file, as the user lock is locked using flock for the duration of any
# operation and thus cannot be stolen by another client. It is updated to be in
# the future before returning to the client. The lock only needs to survive until
# the client owning the lock sends another request and re acquires the flock.
if not can_aquire_user_lock(repository_path, session_token): return fail(lock_fail_msg)
# Commits can only take place if the committing user has the latest revision,
# as committing from an outdated state could cause unexpected results, and may
# have conflicts. Conflicts are resolved during a client update so they are
# handled by the client, and a server interface for this is not needed.
data_store = versioned_storage(repository_path)
if data_store.get_head() != request.headers["previous_revision"]: return fail(need_to_update_msg)
# Should the lock expire, the client which had the lock previously will be unable
# to continue the commit it had in progress. When this, or another client attempts
# to commit again it must do so by first obtaining the lock again by calling begin_commit().
# Any remaining commit data from failed prior commits is garbage collected here.
# While it would technically be possible to implement commit resume should the same
# client resume, I only see commits failing due to a network error and this is so
# rare I don't think it's worth the trouble.
if data_store.have_active_commit(): data_store.rollback()
#------------
data_store.begin()
update_user_lock(repository_path, session_token)
return success()
return lock_access(repository_path, with_exclusive_lock) |
def get_conf_path(filename=None):
"""Return absolute path to the config file with the specified filename."""
# Define conf_dir
if running_under_pytest() or SAFE_MODE:
# Use clean config dir if running tests or the user requests it.
conf_dir = get_clean_conf_dir()
elif sys.platform.startswith('linux'):
# This makes us follow the XDG standard to save our settings
# on Linux, as it was requested on Issue 2629
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '')
if not xdg_config_home:
xdg_config_home = osp.join(get_home_dir(), '.config')
if not osp.isdir(xdg_config_home):
os.makedirs(xdg_config_home)
conf_dir = osp.join(xdg_config_home, SUBFOLDER)
else:
conf_dir = osp.join(get_home_dir(), SUBFOLDER)
# Create conf_dir
if not osp.isdir(conf_dir):
if running_under_pytest() or SAFE_MODE:
os.makedirs(conf_dir)
else:
os.mkdir(conf_dir)
if filename is None:
return conf_dir
else:
return osp.join(conf_dir, filename) | Return absolute path to the config file with the specified filename. | Below is the the instruction that describes the task:
### Input:
Return absolute path to the config file with the specified filename.
### Response:
def get_conf_path(filename=None):
"""Return absolute path to the config file with the specified filename."""
# Define conf_dir
if running_under_pytest() or SAFE_MODE:
# Use clean config dir if running tests or the user requests it.
conf_dir = get_clean_conf_dir()
elif sys.platform.startswith('linux'):
# This makes us follow the XDG standard to save our settings
# on Linux, as it was requested on Issue 2629
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '')
if not xdg_config_home:
xdg_config_home = osp.join(get_home_dir(), '.config')
if not osp.isdir(xdg_config_home):
os.makedirs(xdg_config_home)
conf_dir = osp.join(xdg_config_home, SUBFOLDER)
else:
conf_dir = osp.join(get_home_dir(), SUBFOLDER)
# Create conf_dir
if not osp.isdir(conf_dir):
if running_under_pytest() or SAFE_MODE:
os.makedirs(conf_dir)
else:
os.mkdir(conf_dir)
if filename is None:
return conf_dir
else:
return osp.join(conf_dir, filename) |
def unique_string(length=UUID_LENGTH):
'''Generate a unique string'''
# We need a string at least as long as length
string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH)))
return string[:length] if length else string | Generate a unique string | Below is the the instruction that describes the task:
### Input:
Generate a unique string
### Response:
def unique_string(length=UUID_LENGTH):
'''Generate a unique string'''
# We need a string at least as long as length
string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH)))
return string[:length] if length else string |
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append("%s=%s" % (key, quote_header_value(value)))
return "; ".join(segments) | The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append. | Below is the the instruction that describes the task:
### Input:
The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
### Response:
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append("%s=%s" % (key, quote_header_value(value)))
return "; ".join(segments) |
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y+h-4)
# When menu returned, indicate selection in button
evt.Skip() | Handle menu button pressed. | Below is the the instruction that describes the task:
### Input:
Handle menu button pressed.
### Response:
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y+h-4)
# When menu returned, indicate selection in button
evt.Skip() |
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>} | Below is the the instruction that describes the task:
### Input:
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
### Response:
def get_variables():
"""
Send a dictionary of variables associated with the selected run.
Usage description:
This function is usually called to get and display the list of runs associated with a selected project available
in the database for the user to view.
:return: JSON, {<int_keys>: <run_name>}
"""
assert request.method == "POST", "POST request expected received {}".format(request.method)
if request.method == "POST":
try:
selected_run = request.form["selected_run"]
variables = utils.get_variables(selected_run)
# Reset current_index when you select a new run
variable_names = variables.items()
global current_index
current_index = {}
if len(current_index) < 1:
for _, v_n in variable_names:
current_index["{}".format(v_n)] = 0
return jsonify(variables)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) |
def nm2lt7(short_nm: float, long_nm: float, step_cminv: float = 20) -> Tuple[float, float, float]:
"""converts wavelength in nm to cm^-1
minimum meaningful step is 20, but 5 is minimum before crashing lowtran
short: shortest wavelength e.g. 200 nm
long: longest wavelength e.g. 30000 nm
step: step size in cm^-1 e.g. 20
output in cm^-1
"""
short = 1e7 / short_nm
long = 1e7 / long_nm
N = int(np.ceil((short-long) / step_cminv)) + 1 # yes, ceil
return short, long, N | converts wavelength in nm to cm^-1
minimum meaningful step is 20, but 5 is minimum before crashing lowtran
short: shortest wavelength e.g. 200 nm
long: longest wavelength e.g. 30000 nm
step: step size in cm^-1 e.g. 20
output in cm^-1 | Below is the the instruction that describes the task:
### Input:
converts wavelength in nm to cm^-1
minimum meaningful step is 20, but 5 is minimum before crashing lowtran
short: shortest wavelength e.g. 200 nm
long: longest wavelength e.g. 30000 nm
step: step size in cm^-1 e.g. 20
output in cm^-1
### Response:
def nm2lt7(short_nm: float, long_nm: float, step_cminv: float = 20) -> Tuple[float, float, float]:
"""converts wavelength in nm to cm^-1
minimum meaningful step is 20, but 5 is minimum before crashing lowtran
short: shortest wavelength e.g. 200 nm
long: longest wavelength e.g. 30000 nm
step: step size in cm^-1 e.g. 20
output in cm^-1
"""
short = 1e7 / short_nm
long = 1e7 / long_nm
N = int(np.ceil((short-long) / step_cminv)) + 1 # yes, ceil
return short, long, N |
def get_projected_plot(self, selection, mode='rgb', interpolate_factor=4,
circle_size=150, projection_cutoff=0.001,
zero_to_efermi=True, ymin=-6., ymax=6., width=None,
height=None, vbm_cbm_marker=False,
ylabel='Energy (eV)',
dpi=400, plt=None,
dos_plotter=None, dos_options=None, dos_label=None,
dos_aspect=3, aspect=None, fonts=None, style=None,
no_base_style=False):
"""Get a :obj:`matplotlib.pyplot` of the projected band structure.
If the system is spin polarised and ``mode = 'rgb'`` spin up and spin
down bands are differentiated by solid and dashed lines, respectively.
For the other modes, spin up and spin down are plotted separately.
Args:
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which elements and orbitals to project on to the
band structure. These can be specified by both element and
orbital, for example, the following will project the Bi s, p
and S p orbitals::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, to sum all the S
orbitals::
[('Bi', 's'), ('Bi', 'p'), 'S']
You can also choose to sum particular orbitals by supplying a
:obj:`tuple` of orbitals. For example, to sum the S s, p, and
d orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
If ``mode = 'rgb'``, a maximum of 3 orbital/element
combinations can be plotted simultaneously (one for red, green
and blue), otherwise an unlimited number of elements/orbitals
can be selected.
mode (:obj:`str`, optional): Type of projected band structure to
plot. Options are:
"rgb"
The band structure line color depends on the character
of the band. Each element/orbital contributes either
red, green or blue with the corresponding line colour a
mixture of all three colours. This mode only supports
up to 3 elements/orbitals combinations. The order of
the ``selection`` :obj:`tuple` determines which colour
is used for each selection.
"stacked"
The element/orbital contributions are drawn as a
series of stacked circles, with the colour depending on
the composition of the band. The size of the circles
can be scaled using the ``circle_size`` option.
interpolate_factor (:obj:`int`, optional): The factor by which to
interpolate the band structure (necessary to make smooth
lines). A larger number indicates greater interpolation.
circle_size (:obj:`float`, optional): The area of the circles used
when ``mode = 'stacked'``.
projection_cutoff (:obj:`float`): Don't plot projections with
intensities below this number. This option is useful for
stacked plots, where small projections clutter the plot.
zero_to_efermi (:obj:`bool`): Normalise the plot such that the
valence band maximum is set as 0 eV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
vbm_cbm_marker (:obj:`bool`, optional): Plot markers to indicate
the VBM and CBM locations.
ylabel (:obj:`str`, optional): y-axis (i.e. energy) label/units
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos_plotter (:obj:`~sumo.plotting.dos_plotter.SDOSPlotter`, \
optional): Plot the density of states alongside the band
structure. This should be a
:obj:`~sumo.plotting.dos_plotter.SDOSPlotter` object
initialised with the data to plot.
dos_options (:obj:`dict`, optional): The options for density of
states plotting. This should be formatted as a :obj:`dict`
containing any of the following keys:
"yscale" (:obj:`float`)
Scaling factor for the y-axis.
"xmin" (:obj:`float`)
The minimum energy to mask the energy and density of
states data (reduces plotting load).
"xmax" (:obj:`float`)
The maximum energy to mask the energy and density of
states data (reduces plotting load).
"colours" (:obj:`dict`)
Use custom colours for specific element and orbital
combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or
any other format supported by matplotlib.
"plot_total" (:obj:`bool`)
Plot the total density of states. Defaults to ``True``.
"legend_cutoff" (:obj:`float`)
The cut-off (in % of the maximum density of states
within the plotting range) for an elemental orbital to
be labelled in the legend. This prevents the legend
from containing labels for orbitals that have very
little contribution in the plotting range.
"subplot" (:obj:`bool`)
Plot the density of states for each element on separate
subplots. Defaults to ``False``.
dos_label (:obj:`str`, optional): DOS axis label/units
dos_aspect (:obj:`float`, optional): Aspect ratio for the band
structure and density of states subplot. For example,
``dos_aspect = 3``, results in a ratio of 3:1, for the band
structure:dos plots.
aspect (:obj:`float`, optional): The aspect ratio of the band
structure plot. By default the dimensions of the figure size
are used to determine the aspect ratio. Set to ``1`` to force
the plot to be square.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The projected electronic band structure
plot.
"""
if mode == 'rgb' and len(selection) > 3:
raise ValueError('Too many elements/orbitals specified (max 3)')
elif mode == 'solo' and dos_plotter:
raise ValueError('Solo mode plotting with DOS not supported')
if dos_plotter:
plt = pretty_subplot(1, 2, width, height, sharex=False, dpi=dpi,
plt=plt,
gridspec_kw={'width_ratios': [dos_aspect, 1],
'wspace': 0})
ax = plt.gcf().axes[0]
else:
plt = pretty_plot(width, height, dpi=dpi, plt=plt)
ax = plt.gca()
data = self.bs_plot_data(zero_to_efermi)
nbranches = len(data['distances'])
# Ensure we do spin up first, then spin down
spins = sorted(self._bs.bands.keys(), key=lambda s: -s.value)
proj = get_projections_by_branches(self._bs, selection,
normalise='select')
# nd is branch index
for spin, nd in it.product(spins, range(nbranches)):
# mask data to reduce plotting load
bands = np.array(data['energy'][nd][str(spin)])
mask = np.where(np.any(bands > ymin - 0.05, axis=1) &
np.any(bands < ymax + 0.05, axis=1))
distances = data['distances'][nd]
bands = bands[mask]
weights = [proj[nd][i][spin][mask] for i in range(len(selection))]
# interpolate band structure to improve smoothness
dx = (distances[1] - distances[0]) / interpolate_factor
temp_dists = np.arange(distances[0], distances[-1], dx)
bands = interp1d(distances, bands, axis=1)(temp_dists)
weights = interp1d(distances, weights, axis=2)(temp_dists)
distances = temp_dists
# sometimes VASP produces very small negative weights
weights[weights < 0] = 0
if mode == 'rgb':
# colours aren't used now but needed later for legend
colours = ['#ff0000', '#00ff00', '#0000ff']
# if only two orbitals then just use red and blue
if len(weights) == 2:
weights = np.insert(weights, 1, np.zeros(weights[0].shape),
axis=0)
colours = ['#ff0000', '#0000ff']
ls = '-' if spin == Spin.up else '--'
lc = rgbline(distances, bands, weights[0], weights[1],
weights[2], alpha=1, linestyles=ls,
linewidth=(rcParams['lines.linewidth'] * 1.25))
ax.add_collection(lc)
elif mode == 'stacked':
# TODO: Handle spin
# use some nice custom colours first, then default colours
colours = ['#3952A3', '#FAA41A', '#67BC47', '#6ECCDD',
'#ED2025']
colour_series = rcParams['axes.prop_cycle'].by_key()['color']
colours.extend(colour_series)
# very small circles look crap
weights[weights < projection_cutoff] = 0
distances = list(distances) * len(bands)
bands = bands.flatten()
zorders = range(-len(weights), 0)
for w, c, z in zip(weights, colours, zorders):
ax.scatter(distances, bands, c=c, s=circle_size * w ** 2,
zorder=z, rasterized=True)
# plot the legend
for c, spec in zip(colours, selection):
if isinstance(spec, str):
label = spec
else:
label = '{} ({})'.format(spec[0], " + ".join(spec[1]))
ax.scatter([-10000], [-10000], c=c, s=50, label=label,
edgecolors='none')
if dos_plotter:
loc = 1
anchor_point = (-0.2, 1)
else:
loc = 2
anchor_point = (0.95, 1)
ax.legend(bbox_to_anchor=anchor_point, loc=loc, frameon=False,
handletextpad=0.1, scatterpoints=1)
# finish and tidy plot
self._maketicks(ax, ylabel=ylabel)
self._makeplot(ax, plt.gcf(), data, zero_to_efermi=zero_to_efermi,
vbm_cbm_marker=vbm_cbm_marker, width=width,
height=height, ymin=ymin, ymax=ymax,
dos_plotter=dos_plotter, dos_options=dos_options,
dos_label=dos_label, aspect=aspect)
return plt | Get a :obj:`matplotlib.pyplot` of the projected band structure.
If the system is spin polarised and ``mode = 'rgb'`` spin up and spin
down bands are differentiated by solid and dashed lines, respectively.
For the other modes, spin up and spin down are plotted separately.
Args:
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which elements and orbitals to project on to the
band structure. These can be specified by both element and
orbital, for example, the following will project the Bi s, p
and S p orbitals::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, to sum all the S
orbitals::
[('Bi', 's'), ('Bi', 'p'), 'S']
You can also choose to sum particular orbitals by supplying a
:obj:`tuple` of orbitals. For example, to sum the S s, p, and
d orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
If ``mode = 'rgb'``, a maximum of 3 orbital/element
combinations can be plotted simultaneously (one for red, green
and blue), otherwise an unlimited number of elements/orbitals
can be selected.
mode (:obj:`str`, optional): Type of projected band structure to
plot. Options are:
"rgb"
The band structure line color depends on the character
of the band. Each element/orbital contributes either
red, green or blue with the corresponding line colour a
mixture of all three colours. This mode only supports
up to 3 elements/orbitals combinations. The order of
the ``selection`` :obj:`tuple` determines which colour
is used for each selection.
"stacked"
The element/orbital contributions are drawn as a
series of stacked circles, with the colour depending on
the composition of the band. The size of the circles
can be scaled using the ``circle_size`` option.
interpolate_factor (:obj:`int`, optional): The factor by which to
interpolate the band structure (necessary to make smooth
lines). A larger number indicates greater interpolation.
circle_size (:obj:`float`, optional): The area of the circles used
when ``mode = 'stacked'``.
projection_cutoff (:obj:`float`): Don't plot projections with
intensities below this number. This option is useful for
stacked plots, where small projections clutter the plot.
zero_to_efermi (:obj:`bool`): Normalise the plot such that the
valence band maximum is set as 0 eV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
vbm_cbm_marker (:obj:`bool`, optional): Plot markers to indicate
the VBM and CBM locations.
ylabel (:obj:`str`, optional): y-axis (i.e. energy) label/units
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos_plotter (:obj:`~sumo.plotting.dos_plotter.SDOSPlotter`, \
optional): Plot the density of states alongside the band
structure. This should be a
:obj:`~sumo.plotting.dos_plotter.SDOSPlotter` object
initialised with the data to plot.
dos_options (:obj:`dict`, optional): The options for density of
states plotting. This should be formatted as a :obj:`dict`
containing any of the following keys:
"yscale" (:obj:`float`)
Scaling factor for the y-axis.
"xmin" (:obj:`float`)
The minimum energy to mask the energy and density of
states data (reduces plotting load).
"xmax" (:obj:`float`)
The maximum energy to mask the energy and density of
states data (reduces plotting load).
"colours" (:obj:`dict`)
Use custom colours for specific element and orbital
combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or
any other format supported by matplotlib.
"plot_total" (:obj:`bool`)
Plot the total density of states. Defaults to ``True``.
"legend_cutoff" (:obj:`float`)
The cut-off (in % of the maximum density of states
within the plotting range) for an elemental orbital to
be labelled in the legend. This prevents the legend
from containing labels for orbitals that have very
little contribution in the plotting range.
"subplot" (:obj:`bool`)
Plot the density of states for each element on separate
subplots. Defaults to ``False``.
dos_label (:obj:`str`, optional): DOS axis label/units
dos_aspect (:obj:`float`, optional): Aspect ratio for the band
structure and density of states subplot. For example,
``dos_aspect = 3``, results in a ratio of 3:1, for the band
structure:dos plots.
aspect (:obj:`float`, optional): The aspect ratio of the band
structure plot. By default the dimensions of the figure size
are used to determine the aspect ratio. Set to ``1`` to force
the plot to be square.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The projected electronic band structure
plot. | Below is the the instruction that describes the task:
### Input:
Get a :obj:`matplotlib.pyplot` of the projected band structure.
If the system is spin polarised and ``mode = 'rgb'`` spin up and spin
down bands are differentiated by solid and dashed lines, respectively.
For the other modes, spin up and spin down are plotted separately.
Args:
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which elements and orbitals to project on to the
band structure. These can be specified by both element and
orbital, for example, the following will project the Bi s, p
and S p orbitals::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, to sum all the S
orbitals::
[('Bi', 's'), ('Bi', 'p'), 'S']
You can also choose to sum particular orbitals by supplying a
:obj:`tuple` of orbitals. For example, to sum the S s, p, and
d orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
If ``mode = 'rgb'``, a maximum of 3 orbital/element
combinations can be plotted simultaneously (one for red, green
and blue), otherwise an unlimited number of elements/orbitals
can be selected.
mode (:obj:`str`, optional): Type of projected band structure to
plot. Options are:
"rgb"
The band structure line color depends on the character
of the band. Each element/orbital contributes either
red, green or blue with the corresponding line colour a
mixture of all three colours. This mode only supports
up to 3 elements/orbitals combinations. The order of
the ``selection`` :obj:`tuple` determines which colour
is used for each selection.
"stacked"
The element/orbital contributions are drawn as a
series of stacked circles, with the colour depending on
the composition of the band. The size of the circles
can be scaled using the ``circle_size`` option.
interpolate_factor (:obj:`int`, optional): The factor by which to
interpolate the band structure (necessary to make smooth
lines). A larger number indicates greater interpolation.
circle_size (:obj:`float`, optional): The area of the circles used
when ``mode = 'stacked'``.
projection_cutoff (:obj:`float`): Don't plot projections with
intensities below this number. This option is useful for
stacked plots, where small projections clutter the plot.
zero_to_efermi (:obj:`bool`): Normalise the plot such that the
valence band maximum is set as 0 eV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
vbm_cbm_marker (:obj:`bool`, optional): Plot markers to indicate
the VBM and CBM locations.
ylabel (:obj:`str`, optional): y-axis (i.e. energy) label/units
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos_plotter (:obj:`~sumo.plotting.dos_plotter.SDOSPlotter`, \
optional): Plot the density of states alongside the band
structure. This should be a
:obj:`~sumo.plotting.dos_plotter.SDOSPlotter` object
initialised with the data to plot.
dos_options (:obj:`dict`, optional): The options for density of
states plotting. This should be formatted as a :obj:`dict`
containing any of the following keys:
"yscale" (:obj:`float`)
Scaling factor for the y-axis.
"xmin" (:obj:`float`)
The minimum energy to mask the energy and density of
states data (reduces plotting load).
"xmax" (:obj:`float`)
The maximum energy to mask the energy and density of
states data (reduces plotting load).
"colours" (:obj:`dict`)
Use custom colours for specific element and orbital
combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or
any other format supported by matplotlib.
"plot_total" (:obj:`bool`)
Plot the total density of states. Defaults to ``True``.
"legend_cutoff" (:obj:`float`)
The cut-off (in % of the maximum density of states
within the plotting range) for an elemental orbital to
be labelled in the legend. This prevents the legend
from containing labels for orbitals that have very
little contribution in the plotting range.
"subplot" (:obj:`bool`)
Plot the density of states for each element on separate
subplots. Defaults to ``False``.
dos_label (:obj:`str`, optional): DOS axis label/units
dos_aspect (:obj:`float`, optional): Aspect ratio for the band
structure and density of states subplot. For example,
``dos_aspect = 3``, results in a ratio of 3:1, for the band
structure:dos plots.
aspect (:obj:`float`, optional): The aspect ratio of the band
structure plot. By default the dimensions of the figure size
are used to determine the aspect ratio. Set to ``1`` to force
the plot to be square.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The projected electronic band structure
plot.
### Response:
def get_projected_plot(self, selection, mode='rgb', interpolate_factor=4,
circle_size=150, projection_cutoff=0.001,
zero_to_efermi=True, ymin=-6., ymax=6., width=None,
height=None, vbm_cbm_marker=False,
ylabel='Energy (eV)',
dpi=400, plt=None,
dos_plotter=None, dos_options=None, dos_label=None,
dos_aspect=3, aspect=None, fonts=None, style=None,
no_base_style=False):
"""Get a :obj:`matplotlib.pyplot` of the projected band structure.
If the system is spin polarised and ``mode = 'rgb'`` spin up and spin
down bands are differentiated by solid and dashed lines, respectively.
For the other modes, spin up and spin down are plotted separately.
Args:
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which elements and orbitals to project on to the
band structure. These can be specified by both element and
orbital, for example, the following will project the Bi s, p
and S p orbitals::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, to sum all the S
orbitals::
[('Bi', 's'), ('Bi', 'p'), 'S']
You can also choose to sum particular orbitals by supplying a
:obj:`tuple` of orbitals. For example, to sum the S s, p, and
d orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
If ``mode = 'rgb'``, a maximum of 3 orbital/element
combinations can be plotted simultaneously (one for red, green
and blue), otherwise an unlimited number of elements/orbitals
can be selected.
mode (:obj:`str`, optional): Type of projected band structure to
plot. Options are:
"rgb"
The band structure line color depends on the character
of the band. Each element/orbital contributes either
red, green or blue with the corresponding line colour a
mixture of all three colours. This mode only supports
up to 3 elements/orbitals combinations. The order of
the ``selection`` :obj:`tuple` determines which colour
is used for each selection.
"stacked"
The element/orbital contributions are drawn as a
series of stacked circles, with the colour depending on
the composition of the band. The size of the circles
can be scaled using the ``circle_size`` option.
interpolate_factor (:obj:`int`, optional): The factor by which to
interpolate the band structure (necessary to make smooth
lines). A larger number indicates greater interpolation.
circle_size (:obj:`float`, optional): The area of the circles used
when ``mode = 'stacked'``.
projection_cutoff (:obj:`float`): Don't plot projections with
intensities below this number. This option is useful for
stacked plots, where small projections clutter the plot.
zero_to_efermi (:obj:`bool`): Normalise the plot such that the
valence band maximum is set as 0 eV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
vbm_cbm_marker (:obj:`bool`, optional): Plot markers to indicate
the VBM and CBM locations.
ylabel (:obj:`str`, optional): y-axis (i.e. energy) label/units
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos_plotter (:obj:`~sumo.plotting.dos_plotter.SDOSPlotter`, \
optional): Plot the density of states alongside the band
structure. This should be a
:obj:`~sumo.plotting.dos_plotter.SDOSPlotter` object
initialised with the data to plot.
dos_options (:obj:`dict`, optional): The options for density of
states plotting. This should be formatted as a :obj:`dict`
containing any of the following keys:
"yscale" (:obj:`float`)
Scaling factor for the y-axis.
"xmin" (:obj:`float`)
The minimum energy to mask the energy and density of
states data (reduces plotting load).
"xmax" (:obj:`float`)
The maximum energy to mask the energy and density of
states data (reduces plotting load).
"colours" (:obj:`dict`)
Use custom colours for specific element and orbital
combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or
any other format supported by matplotlib.
"plot_total" (:obj:`bool`)
Plot the total density of states. Defaults to ``True``.
"legend_cutoff" (:obj:`float`)
The cut-off (in % of the maximum density of states
within the plotting range) for an elemental orbital to
be labelled in the legend. This prevents the legend
from containing labels for orbitals that have very
little contribution in the plotting range.
"subplot" (:obj:`bool`)
Plot the density of states for each element on separate
subplots. Defaults to ``False``.
dos_label (:obj:`str`, optional): DOS axis label/units
dos_aspect (:obj:`float`, optional): Aspect ratio for the band
structure and density of states subplot. For example,
``dos_aspect = 3``, results in a ratio of 3:1, for the band
structure:dos plots.
aspect (:obj:`float`, optional): The aspect ratio of the band
structure plot. By default the dimensions of the figure size
are used to determine the aspect ratio. Set to ``1`` to force
the plot to be square.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The projected electronic band structure
plot.
"""
if mode == 'rgb' and len(selection) > 3:
raise ValueError('Too many elements/orbitals specified (max 3)')
elif mode == 'solo' and dos_plotter:
raise ValueError('Solo mode plotting with DOS not supported')
if dos_plotter:
plt = pretty_subplot(1, 2, width, height, sharex=False, dpi=dpi,
plt=plt,
gridspec_kw={'width_ratios': [dos_aspect, 1],
'wspace': 0})
ax = plt.gcf().axes[0]
else:
plt = pretty_plot(width, height, dpi=dpi, plt=plt)
ax = plt.gca()
data = self.bs_plot_data(zero_to_efermi)
nbranches = len(data['distances'])
# Ensure we do spin up first, then spin down
spins = sorted(self._bs.bands.keys(), key=lambda s: -s.value)
proj = get_projections_by_branches(self._bs, selection,
normalise='select')
# nd is branch index
for spin, nd in it.product(spins, range(nbranches)):
# mask data to reduce plotting load
bands = np.array(data['energy'][nd][str(spin)])
mask = np.where(np.any(bands > ymin - 0.05, axis=1) &
np.any(bands < ymax + 0.05, axis=1))
distances = data['distances'][nd]
bands = bands[mask]
weights = [proj[nd][i][spin][mask] for i in range(len(selection))]
# interpolate band structure to improve smoothness
dx = (distances[1] - distances[0]) / interpolate_factor
temp_dists = np.arange(distances[0], distances[-1], dx)
bands = interp1d(distances, bands, axis=1)(temp_dists)
weights = interp1d(distances, weights, axis=2)(temp_dists)
distances = temp_dists
# sometimes VASP produces very small negative weights
weights[weights < 0] = 0
if mode == 'rgb':
# colours aren't used now but needed later for legend
colours = ['#ff0000', '#00ff00', '#0000ff']
# if only two orbitals then just use red and blue
if len(weights) == 2:
weights = np.insert(weights, 1, np.zeros(weights[0].shape),
axis=0)
colours = ['#ff0000', '#0000ff']
ls = '-' if spin == Spin.up else '--'
lc = rgbline(distances, bands, weights[0], weights[1],
weights[2], alpha=1, linestyles=ls,
linewidth=(rcParams['lines.linewidth'] * 1.25))
ax.add_collection(lc)
elif mode == 'stacked':
# TODO: Handle spin
# use some nice custom colours first, then default colours
colours = ['#3952A3', '#FAA41A', '#67BC47', '#6ECCDD',
'#ED2025']
colour_series = rcParams['axes.prop_cycle'].by_key()['color']
colours.extend(colour_series)
# very small circles look crap
weights[weights < projection_cutoff] = 0
distances = list(distances) * len(bands)
bands = bands.flatten()
zorders = range(-len(weights), 0)
for w, c, z in zip(weights, colours, zorders):
ax.scatter(distances, bands, c=c, s=circle_size * w ** 2,
zorder=z, rasterized=True)
# plot the legend
for c, spec in zip(colours, selection):
if isinstance(spec, str):
label = spec
else:
label = '{} ({})'.format(spec[0], " + ".join(spec[1]))
ax.scatter([-10000], [-10000], c=c, s=50, label=label,
edgecolors='none')
if dos_plotter:
loc = 1
anchor_point = (-0.2, 1)
else:
loc = 2
anchor_point = (0.95, 1)
ax.legend(bbox_to_anchor=anchor_point, loc=loc, frameon=False,
handletextpad=0.1, scatterpoints=1)
# finish and tidy plot
self._maketicks(ax, ylabel=ylabel)
self._makeplot(ax, plt.gcf(), data, zero_to_efermi=zero_to_efermi,
vbm_cbm_marker=vbm_cbm_marker, width=width,
height=height, ymin=ymin, ymax=ymax,
dos_plotter=dos_plotter, dos_options=dos_options,
dos_label=dos_label, aspect=aspect)
return plt |
def get_converted(self, symbol, units='CAD', system=None, tag=None):
"""
Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used.
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
df = sym.df
curu = sym.units
requ = units
elif isinstance(symbol, tuple):
df = symbol[0]
curu = symbol[1]
requ = units
else:
raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol)))
system = system or self.default_system
tag = tag or self.default_tag
conv = self.converters[system][tag]
newdf = conv.convert(df, curu, requ)
newdf = pd.merge(df, newdf, left_index=True, right_index=True)
newdf = newdf[df.columns[0] + "_y"].to_frame()
newdf.columns = df.columns
return newdf | Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used. | Below is the the instruction that describes the task:
### Input:
Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used.
### Response:
def get_converted(self, symbol, units='CAD', system=None, tag=None):
"""
Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used.
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
df = sym.df
curu = sym.units
requ = units
elif isinstance(symbol, tuple):
df = symbol[0]
curu = symbol[1]
requ = units
else:
raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol)))
system = system or self.default_system
tag = tag or self.default_tag
conv = self.converters[system][tag]
newdf = conv.convert(df, curu, requ)
newdf = pd.merge(df, newdf, left_index=True, right_index=True)
newdf = newdf[df.columns[0] + "_y"].to_frame()
newdf.columns = df.columns
return newdf |
def factorize(self):
""" Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
"""
[prow, pcol] = self.sample_probability()
self._rid = self.sample(self._rrank, prow)
self._cid = self.sample(self._crank, pcol)
self._cmdinit()
self.computeUCR() | Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R. | Below is the the instruction that describes the task:
### Input:
Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
### Response:
def factorize(self):
""" Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
"""
[prow, pcol] = self.sample_probability()
self._rid = self.sample(self._rrank, prow)
self._cid = self.sample(self._crank, pcol)
self._cmdinit()
self.computeUCR() |
def guess_process(query_str, process_map):
"""
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
"""
save_list = []
# loops between the processes available in process_map
for process in process_map:
similarity = SequenceMatcher(None, process, query_str)
# checks if similarity between the process and the query string is
# higher than 50%
if similarity.ratio() > 0.5:
save_list.append(process)
# checks if any process is stored in save_list
if save_list:
logger.info(colored_print(
"Maybe you meant:\n\t{}".format("\n\t".join(save_list)), "white"))
logger.info(colored_print("Hint: check the available processes by using "
"the '-l' or '-L' flag.", "white")) | Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes | Below is the the instruction that describes the task:
### Input:
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
### Response:
def guess_process(query_str, process_map):
"""
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
"""
save_list = []
# loops between the processes available in process_map
for process in process_map:
similarity = SequenceMatcher(None, process, query_str)
# checks if similarity between the process and the query string is
# higher than 50%
if similarity.ratio() > 0.5:
save_list.append(process)
# checks if any process is stored in save_list
if save_list:
logger.info(colored_print(
"Maybe you meant:\n\t{}".format("\n\t".join(save_list)), "white"))
logger.info(colored_print("Hint: check the available processes by using "
"the '-l' or '-L' flag.", "white")) |
def create_table(cls):
"""
create_table
Manually create a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with schema_editor() as schema_editor:
schema_editor.create_model(cls)
else:
raw_sql, _ = connection.creation.sql_create_model(
cls,
no_style(),
[])
cls.delete_table()
cursor = connection.cursor()
try:
cursor.execute(*raw_sql)
finally:
cursor.close() | create_table
Manually create a temporary table for model in test data base.
:return: | Below is the the instruction that describes the task:
### Input:
create_table
Manually create a temporary table for model in test data base.
:return:
### Response:
def create_table(cls):
"""
create_table
Manually create a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with schema_editor() as schema_editor:
schema_editor.create_model(cls)
else:
raw_sql, _ = connection.creation.sql_create_model(
cls,
no_style(),
[])
cls.delete_table()
cursor = connection.cursor()
try:
cursor.execute(*raw_sql)
finally:
cursor.close() |
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read() | Decompresses data for Content-Encoding: gzip. | Below is the the instruction that describes the task:
### Input:
Decompresses data for Content-Encoding: gzip.
### Response:
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read() |
def from_geometry(cls, molecule, do_orders=False, scaling=1.0):
"""Construct a MolecularGraph object based on interatomic distances
All short distances are computed with the binning module and compared
with a database of bond lengths. Based on this comparison, bonded
atoms are detected.
Before marking a pair of atoms A and B as bonded, it is also checked
that there is no third atom C somewhat between A and B.
When an atom C exists that is closer to B (than A) and the angle
A-B-C is less than 45 degrees, atoms A and B are not bonded.
Similarly if C is closer to A (than B) and the angle B-A-C is less
then 45 degrees, A and B are not connected.
Argument:
| ``molecule`` -- The molecule to derive the graph from
Optional arguments:
| ``do_orders`` -- set to True to estimate the bond order
| ``scaling`` -- scale the threshold for the connectivity. increase
this to 1.5 in case of transition states when a
fully connected topology is required.
"""
from molmod.bonds import bonds
unit_cell = molecule.unit_cell
pair_search = PairSearchIntra(
molecule.coordinates,
bonds.max_length*bonds.bond_tolerance*scaling,
unit_cell
)
orders = []
lengths = []
edges = []
for i0, i1, delta, distance in pair_search:
bond_order = bonds.bonded(molecule.numbers[i0], molecule.numbers[i1], distance/scaling)
if bond_order is not None:
if do_orders:
orders.append(bond_order)
lengths.append(distance)
edges.append((i0,i1))
if do_orders:
result = cls(edges, molecule.numbers, orders, symbols=molecule.symbols)
else:
result = cls(edges, molecule.numbers, symbols=molecule.symbols)
# run a check on all neighbors. if two bonds point in a direction that
# differs only by 45 deg. the longest of the two is discarded. the
# double loop over the neighbors is done such that the longest bonds
# are eliminated first
slated_for_removal = set([])
threshold = 0.5**0.5
for c, ns in result.neighbors.items():
lengths_ns = []
for n in ns:
delta = molecule.coordinates[n] - molecule.coordinates[c]
if unit_cell is not None:
delta = unit_cell.shortest_vector(delta)
length = np.linalg.norm(delta)
lengths_ns.append([length, delta, n])
lengths_ns.sort(reverse=True, key=(lambda r: r[0]))
for i0, (length0, delta0, n0) in enumerate(lengths_ns):
for i1, (length1, delta1, n1) in enumerate(lengths_ns[:i0]):
if length1 == 0.0:
continue
cosine = np.dot(delta0, delta1)/length0/length1
if cosine > threshold:
# length1 > length0
slated_for_removal.add((c,n1))
lengths_ns[i1][0] = 0.0
# construct a mask
mask = np.ones(len(edges), bool)
for i0, i1 in slated_for_removal:
edge_index = result.edge_index.get(frozenset([i0,i1]))
if edge_index is None:
raise ValueError('Could not find edge that has to be removed: %i %i' % (i0, i1))
mask[edge_index] = False
# actual removal
edges = [edges[i] for i in range(len(edges)) if mask[i]]
if do_orders:
bond_order = [bond_order[i] for i in range(len(bond_order)) if mask[i]]
result = cls(edges, molecule.numbers, orders)
else:
result = cls(edges, molecule.numbers)
lengths = [lengths[i] for i in range(len(lengths)) if mask[i]]
result.bond_lengths = np.array(lengths)
return result | Construct a MolecularGraph object based on interatomic distances
All short distances are computed with the binning module and compared
with a database of bond lengths. Based on this comparison, bonded
atoms are detected.
Before marking a pair of atoms A and B as bonded, it is also checked
that there is no third atom C somewhat between A and B.
When an atom C exists that is closer to B (than A) and the angle
A-B-C is less than 45 degrees, atoms A and B are not bonded.
Similarly if C is closer to A (than B) and the angle B-A-C is less
then 45 degrees, A and B are not connected.
Argument:
| ``molecule`` -- The molecule to derive the graph from
Optional arguments:
| ``do_orders`` -- set to True to estimate the bond order
| ``scaling`` -- scale the threshold for the connectivity. increase
this to 1.5 in case of transition states when a
fully connected topology is required. | Below is the the instruction that describes the task:
### Input:
Construct a MolecularGraph object based on interatomic distances
All short distances are computed with the binning module and compared
with a database of bond lengths. Based on this comparison, bonded
atoms are detected.
Before marking a pair of atoms A and B as bonded, it is also checked
that there is no third atom C somewhat between A and B.
When an atom C exists that is closer to B (than A) and the angle
A-B-C is less than 45 degrees, atoms A and B are not bonded.
Similarly if C is closer to A (than B) and the angle B-A-C is less
then 45 degrees, A and B are not connected.
Argument:
| ``molecule`` -- The molecule to derive the graph from
Optional arguments:
| ``do_orders`` -- set to True to estimate the bond order
| ``scaling`` -- scale the threshold for the connectivity. increase
this to 1.5 in case of transition states when a
fully connected topology is required.
### Response:
def from_geometry(cls, molecule, do_orders=False, scaling=1.0):
"""Construct a MolecularGraph object based on interatomic distances
All short distances are computed with the binning module and compared
with a database of bond lengths. Based on this comparison, bonded
atoms are detected.
Before marking a pair of atoms A and B as bonded, it is also checked
that there is no third atom C somewhat between A and B.
When an atom C exists that is closer to B (than A) and the angle
A-B-C is less than 45 degrees, atoms A and B are not bonded.
Similarly if C is closer to A (than B) and the angle B-A-C is less
then 45 degrees, A and B are not connected.
Argument:
| ``molecule`` -- The molecule to derive the graph from
Optional arguments:
| ``do_orders`` -- set to True to estimate the bond order
| ``scaling`` -- scale the threshold for the connectivity. increase
this to 1.5 in case of transition states when a
fully connected topology is required.
"""
from molmod.bonds import bonds
unit_cell = molecule.unit_cell
pair_search = PairSearchIntra(
molecule.coordinates,
bonds.max_length*bonds.bond_tolerance*scaling,
unit_cell
)
orders = []
lengths = []
edges = []
for i0, i1, delta, distance in pair_search:
bond_order = bonds.bonded(molecule.numbers[i0], molecule.numbers[i1], distance/scaling)
if bond_order is not None:
if do_orders:
orders.append(bond_order)
lengths.append(distance)
edges.append((i0,i1))
if do_orders:
result = cls(edges, molecule.numbers, orders, symbols=molecule.symbols)
else:
result = cls(edges, molecule.numbers, symbols=molecule.symbols)
# run a check on all neighbors. if two bonds point in a direction that
# differs only by 45 deg. the longest of the two is discarded. the
# double loop over the neighbors is done such that the longest bonds
# are eliminated first
slated_for_removal = set([])
threshold = 0.5**0.5
for c, ns in result.neighbors.items():
lengths_ns = []
for n in ns:
delta = molecule.coordinates[n] - molecule.coordinates[c]
if unit_cell is not None:
delta = unit_cell.shortest_vector(delta)
length = np.linalg.norm(delta)
lengths_ns.append([length, delta, n])
lengths_ns.sort(reverse=True, key=(lambda r: r[0]))
for i0, (length0, delta0, n0) in enumerate(lengths_ns):
for i1, (length1, delta1, n1) in enumerate(lengths_ns[:i0]):
if length1 == 0.0:
continue
cosine = np.dot(delta0, delta1)/length0/length1
if cosine > threshold:
# length1 > length0
slated_for_removal.add((c,n1))
lengths_ns[i1][0] = 0.0
# construct a mask
mask = np.ones(len(edges), bool)
for i0, i1 in slated_for_removal:
edge_index = result.edge_index.get(frozenset([i0,i1]))
if edge_index is None:
raise ValueError('Could not find edge that has to be removed: %i %i' % (i0, i1))
mask[edge_index] = False
# actual removal
edges = [edges[i] for i in range(len(edges)) if mask[i]]
if do_orders:
bond_order = [bond_order[i] for i in range(len(bond_order)) if mask[i]]
result = cls(edges, molecule.numbers, orders)
else:
result = cls(edges, molecule.numbers)
lengths = [lengths[i] for i in range(len(lengths)) if mask[i]]
result.bond_lengths = np.array(lengths)
return result |
def request_info(self, request_id):
"""
Get information of a single pull request.
:param request_id: the id of the request
:return:
"""
request_url = "{}pull-request/{}".format(self.create_basic_url(),
request_id)
return_value = self._call_api(request_url)
return return_value | Get information of a single pull request.
:param request_id: the id of the request
:return: | Below is the the instruction that describes the task:
### Input:
Get information of a single pull request.
:param request_id: the id of the request
:return:
### Response:
def request_info(self, request_id):
"""
Get information of a single pull request.
:param request_id: the id of the request
:return:
"""
request_url = "{}pull-request/{}".format(self.create_basic_url(),
request_id)
return_value = self._call_api(request_url)
return return_value |
def get(self, names_to_get, convert_to_numpy=True):
""" Loads the requested variables from the matlab com client.
names_to_get can be either a variable name or a list of variable names.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If convert_to_numpy is true, the method will all array values to numpy
arrays. Scalars are left as regular python objects.
"""
self._check_open()
single_itme = isinstance(names_to_get, (unicode, str))
if single_itme:
names_to_get = [names_to_get]
ret = {}
for name in names_to_get:
ret[name] = self.client.GetWorkspaceData(name, 'base')
# TODO(daniv): Do we really want to reduce dimensions like that? what if this a row vector?
while isinstance(ret[name], (tuple, list)) and len(ret[name]) == 1:
ret[name] = ret[name][0]
if convert_to_numpy and isinstance(ret[name], (tuple, list)):
ret[name] = np.array(ret[name])
if single_itme:
return ret.values()[0]
return ret | Loads the requested variables from the matlab com client.
names_to_get can be either a variable name or a list of variable names.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If convert_to_numpy is true, the method will all array values to numpy
arrays. Scalars are left as regular python objects. | Below is the the instruction that describes the task:
### Input:
Loads the requested variables from the matlab com client.
names_to_get can be either a variable name or a list of variable names.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If convert_to_numpy is true, the method will all array values to numpy
arrays. Scalars are left as regular python objects.
### Response:
def get(self, names_to_get, convert_to_numpy=True):
""" Loads the requested variables from the matlab com client.
names_to_get can be either a variable name or a list of variable names.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If convert_to_numpy is true, the method will all array values to numpy
arrays. Scalars are left as regular python objects.
"""
self._check_open()
single_itme = isinstance(names_to_get, (unicode, str))
if single_itme:
names_to_get = [names_to_get]
ret = {}
for name in names_to_get:
ret[name] = self.client.GetWorkspaceData(name, 'base')
# TODO(daniv): Do we really want to reduce dimensions like that? what if this a row vector?
while isinstance(ret[name], (tuple, list)) and len(ret[name]) == 1:
ret[name] = ret[name][0]
if convert_to_numpy and isinstance(ret[name], (tuple, list)):
ret[name] = np.array(ret[name])
if single_itme:
return ret.values()[0]
return ret |
async def _start_server(python, port,
venv_site_pkgs=None, cwd=None) -> sp.Popen:
"""
Starts an update server sandboxed in the virtual env, and attempts to read
the health endpoint with retries to determine when the server is available.
If the number of retries is exceeded, the returned server process will
already be terminated.
:return: the server process
"""
log.info("Starting sandboxed update server on port {}".format(port))
if venv_site_pkgs:
python = 'PYTHONPATH={} {}'.format(venv_site_pkgs, python)
cmd = [python, '-m', 'otupdate', '--debug', '--test', '--port', str(port)]
log.debug('cmd: {}'.format(' '.join(cmd)))
proc = sp.Popen(' '.join(cmd), shell=True, cwd=cwd)
atexit.register(lambda: _stop_server(proc))
n_retries = 3
async with aiohttp.ClientSession() as session:
test_status, detail = await selftest.health_check(
session=session, port=port, retries=n_retries)
if test_status == 'failure':
log.debug(
"Test server failed to start after {} retries. Stopping.".format(
n_retries))
_stop_server(proc)
return proc | Starts an update server sandboxed in the virtual env, and attempts to read
the health endpoint with retries to determine when the server is available.
If the number of retries is exceeded, the returned server process will
already be terminated.
:return: the server process | Below is the the instruction that describes the task:
### Input:
Starts an update server sandboxed in the virtual env, and attempts to read
the health endpoint with retries to determine when the server is available.
If the number of retries is exceeded, the returned server process will
already be terminated.
:return: the server process
### Response:
async def _start_server(python, port,
venv_site_pkgs=None, cwd=None) -> sp.Popen:
"""
Starts an update server sandboxed in the virtual env, and attempts to read
the health endpoint with retries to determine when the server is available.
If the number of retries is exceeded, the returned server process will
already be terminated.
:return: the server process
"""
log.info("Starting sandboxed update server on port {}".format(port))
if venv_site_pkgs:
python = 'PYTHONPATH={} {}'.format(venv_site_pkgs, python)
cmd = [python, '-m', 'otupdate', '--debug', '--test', '--port', str(port)]
log.debug('cmd: {}'.format(' '.join(cmd)))
proc = sp.Popen(' '.join(cmd), shell=True, cwd=cwd)
atexit.register(lambda: _stop_server(proc))
n_retries = 3
async with aiohttp.ClientSession() as session:
test_status, detail = await selftest.health_check(
session=session, port=port, retries=n_retries)
if test_status == 'failure':
log.debug(
"Test server failed to start after {} retries. Stopping.".format(
n_retries))
_stop_server(proc)
return proc |
def created_at(self):
"""
:return: When the document was created
:rtype: :py:class:`datetime.datetime`
"""
if self._created_at:
return self._created_at
elif not self.abstract:
return self.read_meta()._created_at
raise EmptyDocumentException() | :return: When the document was created
:rtype: :py:class:`datetime.datetime` | Below is the the instruction that describes the task:
### Input:
:return: When the document was created
:rtype: :py:class:`datetime.datetime`
### Response:
def created_at(self):
"""
:return: When the document was created
:rtype: :py:class:`datetime.datetime`
"""
if self._created_at:
return self._created_at
elif not self.abstract:
return self.read_meta()._created_at
raise EmptyDocumentException() |
def create_url(self, path, params={}, opts={}):
"""
Create URL with supplied path and `opts` parameters dict.
Parameters
----------
path : str
opts : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default {})
Returns
-------
str
imgix URL
"""
if opts:
warnings.warn('`opts` has been deprecated. Use `params` instead.',
DeprecationWarning, stacklevel=2)
params = params or opts
if self._shard_strategy == SHARD_STRATEGY_CRC:
crc = zlib.crc32(path.encode('utf-8')) & 0xffffffff
index = crc % len(self._domains) # Deterministically choose domain
domain = self._domains[index]
elif self._shard_strategy == SHARD_STRATEGY_CYCLE:
domain = self._domains[self._shard_next_index]
self._shard_next_index = (
self._shard_next_index + 1) % len(self._domains)
else:
domain = self._domains[0]
scheme = "https" if self._use_https else "http"
url_obj = UrlHelper(
domain,
path,
scheme,
sign_key=self._sign_key,
include_library_param=self._include_library_param,
params=params)
return str(url_obj) | Create URL with supplied path and `opts` parameters dict.
Parameters
----------
path : str
opts : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default {})
Returns
-------
str
imgix URL | Below is the the instruction that describes the task:
### Input:
Create URL with supplied path and `opts` parameters dict.
Parameters
----------
path : str
opts : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default {})
Returns
-------
str
imgix URL
### Response:
def create_url(self, path, params={}, opts={}):
"""
Create URL with supplied path and `opts` parameters dict.
Parameters
----------
path : str
opts : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default {})
Returns
-------
str
imgix URL
"""
if opts:
warnings.warn('`opts` has been deprecated. Use `params` instead.',
DeprecationWarning, stacklevel=2)
params = params or opts
if self._shard_strategy == SHARD_STRATEGY_CRC:
crc = zlib.crc32(path.encode('utf-8')) & 0xffffffff
index = crc % len(self._domains) # Deterministically choose domain
domain = self._domains[index]
elif self._shard_strategy == SHARD_STRATEGY_CYCLE:
domain = self._domains[self._shard_next_index]
self._shard_next_index = (
self._shard_next_index + 1) % len(self._domains)
else:
domain = self._domains[0]
scheme = "https" if self._use_https else "http"
url_obj = UrlHelper(
domain,
path,
scheme,
sign_key=self._sign_key,
include_library_param=self._include_library_param,
params=params)
return str(url_obj) |
def fit_plots(ax,xfit,xraw,yfit,yraw):
'''
#====================================================================
plot the fitted results for data fit and refit
#====================================================================
'''
global _yfits_
_yfits_ = yfit
ax.plot(xfit, yfit)
ax.plot(xfit, np.sum(yfit,axis=1))
ax.scatter(xraw, yraw)
ax.set_xlabel('Field (log10(mT))')
ax.set_ylabel('IRM normalization') | #====================================================================
plot the fitted results for data fit and refit
#==================================================================== | Below is the the instruction that describes the task:
### Input:
#====================================================================
plot the fitted results for data fit and refit
#====================================================================
### Response:
def fit_plots(ax,xfit,xraw,yfit,yraw):
'''
#====================================================================
plot the fitted results for data fit and refit
#====================================================================
'''
global _yfits_
_yfits_ = yfit
ax.plot(xfit, yfit)
ax.plot(xfit, np.sum(yfit,axis=1))
ax.scatter(xraw, yraw)
ax.set_xlabel('Field (log10(mT))')
ax.set_ylabel('IRM normalization') |
def query(cls, automation=None, offset=None, limit=None, api=None):
"""
Query (List) apps.
:param automation: Automation id.
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: Api instance.
:return: collection object
"""
automation_id = Transform.to_automation(automation)
api = api or cls._API
return super(AutomationMember, cls)._query(
url=cls._URL['query'].format(automation_id=automation_id),
automation_id=automation_id,
offset=offset,
limit=limit,
api=api,
) | Query (List) apps.
:param automation: Automation id.
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: Api instance.
:return: collection object | Below is the the instruction that describes the task:
### Input:
Query (List) apps.
:param automation: Automation id.
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: Api instance.
:return: collection object
### Response:
def query(cls, automation=None, offset=None, limit=None, api=None):
"""
Query (List) apps.
:param automation: Automation id.
:param offset: Pagination offset.
:param limit: Pagination limit.
:param api: Api instance.
:return: collection object
"""
automation_id = Transform.to_automation(automation)
api = api or cls._API
return super(AutomationMember, cls)._query(
url=cls._URL['query'].format(automation_id=automation_id),
automation_id=automation_id,
offset=offset,
limit=limit,
api=api,
) |
def get_text_alignment(pos):
"""Return 'verticalalignment'('va') and 'horizontalalignment'('ha') for given 'pos' (position)"""
pos = pos.lower() # to make it case insensitive
va, ha = None, None
if pos == 'nw': va, ha = 'top', 'left'
elif pos == 'ne': va, ha = 'top', 'right'
elif pos == 'sw': va, ha = 'bottom', 'left'
elif pos == 'se': va, ha = 'bottom', 'right'
else: raise ValueError("Unknown value for 'pos': %s" % (str(pos)))
return {'va':va, 'ha':ha} | Return 'verticalalignment'('va') and 'horizontalalignment'('ha') for given 'pos' (position) | Below is the the instruction that describes the task:
### Input:
Return 'verticalalignment'('va') and 'horizontalalignment'('ha') for given 'pos' (position)
### Response:
def get_text_alignment(pos):
"""Return 'verticalalignment'('va') and 'horizontalalignment'('ha') for given 'pos' (position)"""
pos = pos.lower() # to make it case insensitive
va, ha = None, None
if pos == 'nw': va, ha = 'top', 'left'
elif pos == 'ne': va, ha = 'top', 'right'
elif pos == 'sw': va, ha = 'bottom', 'left'
elif pos == 'se': va, ha = 'bottom', 'right'
else: raise ValueError("Unknown value for 'pos': %s" % (str(pos)))
return {'va':va, 'ha':ha} |
def _build_sliced_filepath(filename, slice_count):
""" append slice_count to the end of a filename """
root = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1]
new_filepath = ''.join((root, str(slice_count), ext))
return _build_filepath_for_phantomcss(new_filepath) | append slice_count to the end of a filename | Below is the the instruction that describes the task:
### Input:
append slice_count to the end of a filename
### Response:
def _build_sliced_filepath(filename, slice_count):
""" append slice_count to the end of a filename """
root = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1]
new_filepath = ''.join((root, str(slice_count), ext))
return _build_filepath_for_phantomcss(new_filepath) |
def reset_password_view(self, token):
""" Verify the password reset token, Prompt for new password, and set the user's password."""
# Verify token
if self.call_or_get(current_user.is_authenticated):
logout_user()
data_items = self.token_manager.verify_token(
token,
self.USER_RESET_PASSWORD_EXPIRATION)
user = None
if data_items:
# Get User by user ID
user_id = data_items[0]
user = self.db_manager.get_user_by_id(user_id)
# Mark email as confirmed
user_or_user_email_object = self.db_manager.get_primary_user_email_object(user)
user_or_user_email_object.email_confirmed_at = datetime.utcnow()
self.db_manager.save_object(user_or_user_email_object)
self.db_manager.commit()
if not user:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(self._endpoint_url('user.login'))
# Initialize form
form = self.ResetPasswordFormClass(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Change password
password_hash = self.hash_password(form.new_password.data)
user.password=password_hash
self.db_manager.save_object(user)
self.db_manager.commit()
# Send 'password_changed' email
if self.USER_ENABLE_EMAIL and self.USER_SEND_PASSWORD_CHANGED_EMAIL:
self.email_manager.send_password_changed_email(user)
# Send reset_password signal
signals.user_reset_password.send(current_app._get_current_object(), user=user)
# Flash a system message
flash(_("Your password has been reset successfully."), 'success')
# Auto-login after reset password or redirect to login page
safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_RESET_PASSWORD_ENDPOINT)
if self.USER_AUTO_LOGIN_AFTER_RESET_PASSWORD:
return self._do_login_user(user, safe_next_url) # auto-login
else:
return redirect(url_for('user.login') + '?next=' + quote(safe_next_url)) # redirect to login page
# Render form
self.prepare_domain_translations()
return render_template(self.USER_RESET_PASSWORD_TEMPLATE, form=form) | Verify the password reset token, Prompt for new password, and set the user's password. | Below is the the instruction that describes the task:
### Input:
Verify the password reset token, Prompt for new password, and set the user's password.
### Response:
def reset_password_view(self, token):
""" Verify the password reset token, Prompt for new password, and set the user's password."""
# Verify token
if self.call_or_get(current_user.is_authenticated):
logout_user()
data_items = self.token_manager.verify_token(
token,
self.USER_RESET_PASSWORD_EXPIRATION)
user = None
if data_items:
# Get User by user ID
user_id = data_items[0]
user = self.db_manager.get_user_by_id(user_id)
# Mark email as confirmed
user_or_user_email_object = self.db_manager.get_primary_user_email_object(user)
user_or_user_email_object.email_confirmed_at = datetime.utcnow()
self.db_manager.save_object(user_or_user_email_object)
self.db_manager.commit()
if not user:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(self._endpoint_url('user.login'))
# Initialize form
form = self.ResetPasswordFormClass(request.form)
# Process valid POST
if request.method == 'POST' and form.validate():
# Change password
password_hash = self.hash_password(form.new_password.data)
user.password=password_hash
self.db_manager.save_object(user)
self.db_manager.commit()
# Send 'password_changed' email
if self.USER_ENABLE_EMAIL and self.USER_SEND_PASSWORD_CHANGED_EMAIL:
self.email_manager.send_password_changed_email(user)
# Send reset_password signal
signals.user_reset_password.send(current_app._get_current_object(), user=user)
# Flash a system message
flash(_("Your password has been reset successfully."), 'success')
# Auto-login after reset password or redirect to login page
safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_RESET_PASSWORD_ENDPOINT)
if self.USER_AUTO_LOGIN_AFTER_RESET_PASSWORD:
return self._do_login_user(user, safe_next_url) # auto-login
else:
return redirect(url_for('user.login') + '?next=' + quote(safe_next_url)) # redirect to login page
# Render form
self.prepare_domain_translations()
return render_template(self.USER_RESET_PASSWORD_TEMPLATE, form=form) |
def is_generator(self, node):
"""Traverse the tree below node looking for 'yield [expr]'."""
results = {}
if self.yield_expr.match(node, results):
return True
for child in node.children:
if child.type not in (syms.funcdef, syms.classdef):
if self.is_generator(child):
return True
return False | Traverse the tree below node looking for 'yield [expr]'. | Below is the the instruction that describes the task:
### Input:
Traverse the tree below node looking for 'yield [expr]'.
### Response:
def is_generator(self, node):
"""Traverse the tree below node looking for 'yield [expr]'."""
results = {}
if self.yield_expr.match(node, results):
return True
for child in node.children:
if child.type not in (syms.funcdef, syms.classdef):
if self.is_generator(child):
return True
return False |
def _get_bonds(self, mol):
"""
Find all the bond in a molcule
Args:
mol: the molecule. pymatgen Molecule object
Returns:
List of tuple. Each tuple correspond to a bond represented by the
id of the two end atoms.
"""
num_atoms = len(mol)
# index starting from 0
if self.ignore_ionic_bond:
covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list]
else:
covalent_atoms = list(range(num_atoms))
all_pairs = list(itertools.combinations(covalent_atoms, 2))
pair_dists = [mol.get_distance(*p) for p in all_pairs]
elements = mol.composition.as_dict().keys()
unavailable_elements = list(set(elements) -
set(self.covalent_radius.keys()))
if len(unavailable_elements) > 0:
raise ValueError("The covalent radius for element {} is not "
"available".format(unavailable_elements))
bond_13 = self.get_13_bonds(self.priority_bonds)
max_length = [(self.covalent_radius[mol.sites[p[0]].specie.symbol] +
self.covalent_radius[mol.sites[p[1]].specie.symbol]) *
(1 + (self.priority_cap if p in self.priority_bonds
else (self.bond_length_cap if p not in bond_13
else self.bond_13_cap))) *
(0.1 if (self.ignore_halogen_self_bond and p not in self.priority_bonds and
mol.sites[p[0]].specie.symbol in self.halogen_list and
mol.sites[p[1]].specie.symbol in self.halogen_list)
else 1.0)
for p in all_pairs]
bonds = [bond
for bond, dist, cap in zip(all_pairs, pair_dists, max_length)
if dist <= cap]
return bonds | Find all the bond in a molcule
Args:
mol: the molecule. pymatgen Molecule object
Returns:
List of tuple. Each tuple correspond to a bond represented by the
id of the two end atoms. | Below is the the instruction that describes the task:
### Input:
Find all the bond in a molcule
Args:
mol: the molecule. pymatgen Molecule object
Returns:
List of tuple. Each tuple correspond to a bond represented by the
id of the two end atoms.
### Response:
def _get_bonds(self, mol):
"""
Find all the bond in a molcule
Args:
mol: the molecule. pymatgen Molecule object
Returns:
List of tuple. Each tuple correspond to a bond represented by the
id of the two end atoms.
"""
num_atoms = len(mol)
# index starting from 0
if self.ignore_ionic_bond:
covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list]
else:
covalent_atoms = list(range(num_atoms))
all_pairs = list(itertools.combinations(covalent_atoms, 2))
pair_dists = [mol.get_distance(*p) for p in all_pairs]
elements = mol.composition.as_dict().keys()
unavailable_elements = list(set(elements) -
set(self.covalent_radius.keys()))
if len(unavailable_elements) > 0:
raise ValueError("The covalent radius for element {} is not "
"available".format(unavailable_elements))
bond_13 = self.get_13_bonds(self.priority_bonds)
max_length = [(self.covalent_radius[mol.sites[p[0]].specie.symbol] +
self.covalent_radius[mol.sites[p[1]].specie.symbol]) *
(1 + (self.priority_cap if p in self.priority_bonds
else (self.bond_length_cap if p not in bond_13
else self.bond_13_cap))) *
(0.1 if (self.ignore_halogen_self_bond and p not in self.priority_bonds and
mol.sites[p[0]].specie.symbol in self.halogen_list and
mol.sites[p[1]].specie.symbol in self.halogen_list)
else 1.0)
for p in all_pairs]
bonds = [bond
for bond, dist, cap in zip(all_pairs, pair_dists, max_length)
if dist <= cap]
return bonds |
def cli_decrypt(context, key):
"""
Decrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
See :py:mod:`swiftly.cli.decrypt` for context usage information.
See :py:class:`CLIDecrypt` for more information.
"""
with context.io_manager.with_stdout() as stdout:
with context.io_manager.with_stdin() as stdin:
crypt_type = stdin.read(1)
if crypt_type == AES256CBC:
for chunk in aes_decrypt(key, stdin):
stdout.write(chunk)
stdout.flush()
else:
raise ReturnCode(
'contents encrypted with unsupported type %r' % crypt_type) | Decrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
See :py:mod:`swiftly.cli.decrypt` for context usage information.
See :py:class:`CLIDecrypt` for more information. | Below is the the instruction that describes the task:
### Input:
Decrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
See :py:mod:`swiftly.cli.decrypt` for context usage information.
See :py:class:`CLIDecrypt` for more information.
### Response:
def cli_decrypt(context, key):
"""
Decrypts context.io_manager's stdin and sends that to
context.io_manager's stdout.
See :py:mod:`swiftly.cli.decrypt` for context usage information.
See :py:class:`CLIDecrypt` for more information.
"""
with context.io_manager.with_stdout() as stdout:
with context.io_manager.with_stdin() as stdin:
crypt_type = stdin.read(1)
if crypt_type == AES256CBC:
for chunk in aes_decrypt(key, stdin):
stdout.write(chunk)
stdout.flush()
else:
raise ReturnCode(
'contents encrypted with unsupported type %r' % crypt_type) |
def get_django_user(self, username, password=None):
"""
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
"""
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username)
if password is not None:
user.set_password(password)
user.save()
return user | Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not). | Below is the the instruction that describes the task:
### Input:
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
### Response:
def get_django_user(self, username, password=None):
"""
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
"""
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username)
if password is not None:
user.set_password(password)
user.save()
return user |
def add_document(self, doc_url, data):
""" Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data
"""
file_path = self.__generate_filepath()
with open(file_path, 'wb') as f:
f.write(data)
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
for row in c.fetchall():
old_file_path = row[1]
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
c.execute("DELETE FROM documents WHERE url=?", (str(doc_url),))
self.conn.commit()
c.execute("INSERT INTO documents VALUES (?, ?, ?)",
(str(doc_url), file_path, self.__now_iso_8601()))
self.conn.commit()
c.close() | Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data | Below is the the instruction that describes the task:
### Input:
Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data
### Response:
def add_document(self, doc_url, data):
""" Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data
"""
file_path = self.__generate_filepath()
with open(file_path, 'wb') as f:
f.write(data)
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
for row in c.fetchall():
old_file_path = row[1]
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
c.execute("DELETE FROM documents WHERE url=?", (str(doc_url),))
self.conn.commit()
c.execute("INSERT INTO documents VALUES (?, ?, ?)",
(str(doc_url), file_path, self.__now_iso_8601()))
self.conn.commit()
c.close() |
def growth_curve(self, method='best', **method_options):
"""
Return QMED estimate using best available methodology depending on what catchment attributes are available.
====================== ====================== ==================================================================
`method` `method_options` notes
====================== ====================== ==================================================================
`enhanced_single_site` `distr='glo'` Preferred method for gauged catchments (i.e. with
`as_rural=False` `Catchment.amax_record`).
`single_site` `distr='glo'` Alternative method for gauged catchments. Uses AMAX data from
subject station only.
`pooling_group` `distr='glo'` Only possible method for ungauged catchments.
`as_rural=False`
====================== ====================== ==================================================================
:param method: methodology to use to estimate the growth curve. Default: automatically choose best method.
:type method: str
:param method_options: any optional parameters for the growth curve method function
:type method_options: kwargs
:return: Inverse cumulative distribution function, callable class with one parameter `aep` (annual exceedance
probability)
:type: :class:`.GrowthCurve`
"""
if method == 'best':
if self.catchment.amax_records:
# Gauged catchment, use enhanced single site
self.results_log['method'] = 'enhanced_single_site'
return self._growth_curve_enhanced_single_site()
else:
# Ungauged catchment, standard pooling group
self.results_log['method'] = 'pooling_group'
return self._growth_curve_pooling_group()
else:
try:
self.results_log['method'] = 'method'
return getattr(self, '_growth_curve_' + method)(**method_options)
except AttributeError:
raise AttributeError("Method `{}` to estimate the growth curve does not exist.".format(method)) | Return QMED estimate using best available methodology depending on what catchment attributes are available.
====================== ====================== ==================================================================
`method` `method_options` notes
====================== ====================== ==================================================================
`enhanced_single_site` `distr='glo'` Preferred method for gauged catchments (i.e. with
`as_rural=False` `Catchment.amax_record`).
`single_site` `distr='glo'` Alternative method for gauged catchments. Uses AMAX data from
subject station only.
`pooling_group` `distr='glo'` Only possible method for ungauged catchments.
`as_rural=False`
====================== ====================== ==================================================================
:param method: methodology to use to estimate the growth curve. Default: automatically choose best method.
:type method: str
:param method_options: any optional parameters for the growth curve method function
:type method_options: kwargs
:return: Inverse cumulative distribution function, callable class with one parameter `aep` (annual exceedance
probability)
:type: :class:`.GrowthCurve` | Below is the the instruction that describes the task:
### Input:
Return QMED estimate using best available methodology depending on what catchment attributes are available.
====================== ====================== ==================================================================
`method` `method_options` notes
====================== ====================== ==================================================================
`enhanced_single_site` `distr='glo'` Preferred method for gauged catchments (i.e. with
`as_rural=False` `Catchment.amax_record`).
`single_site` `distr='glo'` Alternative method for gauged catchments. Uses AMAX data from
subject station only.
`pooling_group` `distr='glo'` Only possible method for ungauged catchments.
`as_rural=False`
====================== ====================== ==================================================================
:param method: methodology to use to estimate the growth curve. Default: automatically choose best method.
:type method: str
:param method_options: any optional parameters for the growth curve method function
:type method_options: kwargs
:return: Inverse cumulative distribution function, callable class with one parameter `aep` (annual exceedance
probability)
:type: :class:`.GrowthCurve`
### Response:
def growth_curve(self, method='best', **method_options):
"""
Return QMED estimate using best available methodology depending on what catchment attributes are available.
====================== ====================== ==================================================================
`method` `method_options` notes
====================== ====================== ==================================================================
`enhanced_single_site` `distr='glo'` Preferred method for gauged catchments (i.e. with
`as_rural=False` `Catchment.amax_record`).
`single_site` `distr='glo'` Alternative method for gauged catchments. Uses AMAX data from
subject station only.
`pooling_group` `distr='glo'` Only possible method for ungauged catchments.
`as_rural=False`
====================== ====================== ==================================================================
:param method: methodology to use to estimate the growth curve. Default: automatically choose best method.
:type method: str
:param method_options: any optional parameters for the growth curve method function
:type method_options: kwargs
:return: Inverse cumulative distribution function, callable class with one parameter `aep` (annual exceedance
probability)
:type: :class:`.GrowthCurve`
"""
if method == 'best':
if self.catchment.amax_records:
# Gauged catchment, use enhanced single site
self.results_log['method'] = 'enhanced_single_site'
return self._growth_curve_enhanced_single_site()
else:
# Ungauged catchment, standard pooling group
self.results_log['method'] = 'pooling_group'
return self._growth_curve_pooling_group()
else:
try:
self.results_log['method'] = 'method'
return getattr(self, '_growth_curve_' + method)(**method_options)
except AttributeError:
raise AttributeError("Method `{}` to estimate the growth curve does not exist.".format(method)) |
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new | Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates. | Below is the the instruction that describes the task:
### Input:
Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
### Response:
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new |
def build_request_signature(self, saml_request, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
return self.__build_signature(saml_request, relay_state, 'SAMLRequest', sign_algorithm) | Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string | Below is the the instruction that describes the task:
### Input:
Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
### Response:
def build_request_signature(self, saml_request, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1):
"""
Builds the Signature of the SAML Request.
:param saml_request: The SAML Request
:type saml_request: string
:param relay_state: The target URL the user should be redirected to
:type relay_state: string
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
"""
return self.__build_signature(saml_request, relay_state, 'SAMLRequest', sign_algorithm) |
def cli(obj):
"""List API keys."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/keys')
click.echo(json.dumps(r['keys'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'key': 'API KEY', 'user': 'USER', 'scopes': 'SCOPES', 'text': 'TEXT',
'expireTime': 'EXPIRES', 'count': 'COUNT', 'lastUsedTime': 'LAST USED', 'customer': 'CUSTOMER'
}
click.echo(tabulate([k.tabular(timezone) for k in client.get_keys()], headers=headers, tablefmt=obj['output'])) | List API keys. | Below is the the instruction that describes the task:
### Input:
List API keys.
### Response:
def cli(obj):
"""List API keys."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/keys')
click.echo(json.dumps(r['keys'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'key': 'API KEY', 'user': 'USER', 'scopes': 'SCOPES', 'text': 'TEXT',
'expireTime': 'EXPIRES', 'count': 'COUNT', 'lastUsedTime': 'LAST USED', 'customer': 'CUSTOMER'
}
click.echo(tabulate([k.tabular(timezone) for k in client.get_keys()], headers=headers, tablefmt=obj['output'])) |
def _get_request(self, url, headers, params=None):
"""
Issue a GET request to the specified endpoint with the data provided.
:param url: str
:pararm headers: dict
:param params: dict
"""
return self._session.get(url, headers=headers, params=params) | Issue a GET request to the specified endpoint with the data provided.
:param url: str
:pararm headers: dict
:param params: dict | Below is the the instruction that describes the task:
### Input:
Issue a GET request to the specified endpoint with the data provided.
:param url: str
:pararm headers: dict
:param params: dict
### Response:
def _get_request(self, url, headers, params=None):
"""
Issue a GET request to the specified endpoint with the data provided.
:param url: str
:pararm headers: dict
:param params: dict
"""
return self._session.get(url, headers=headers, params=params) |
def render(template_file, saltenv='base', sls='', argline='',
context=None, tmplpath=None, **kws):
'''
Render the template_file, passing the functions and grains into the
Jinja rendering system.
:rtype: string
'''
from_str = argline == '-s'
if not from_str and argline:
raise SaltRenderError(
'Unknown renderer option: {opt}'.format(opt=argline)
)
tmp_data = salt.utils.templates.JINJA(template_file,
to_str=True,
salt=_split_module_dicts(),
grains=__grains__,
opts=__opts__,
pillar=__pillar__,
saltenv=saltenv,
sls=sls,
context=context,
tmplpath=tmplpath,
proxy=__proxy__,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(
tmp_data.get('data', 'Unknown render error in jinja renderer')
)
if isinstance(tmp_data['data'], bytes):
tmp_data['data'] = tmp_data['data'].decode(__salt_system_encoding__)
return StringIO(tmp_data['data']) | Render the template_file, passing the functions and grains into the
Jinja rendering system.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Render the template_file, passing the functions and grains into the
Jinja rendering system.
:rtype: string
### Response:
def render(template_file, saltenv='base', sls='', argline='',
context=None, tmplpath=None, **kws):
'''
Render the template_file, passing the functions and grains into the
Jinja rendering system.
:rtype: string
'''
from_str = argline == '-s'
if not from_str and argline:
raise SaltRenderError(
'Unknown renderer option: {opt}'.format(opt=argline)
)
tmp_data = salt.utils.templates.JINJA(template_file,
to_str=True,
salt=_split_module_dicts(),
grains=__grains__,
opts=__opts__,
pillar=__pillar__,
saltenv=saltenv,
sls=sls,
context=context,
tmplpath=tmplpath,
proxy=__proxy__,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(
tmp_data.get('data', 'Unknown render error in jinja renderer')
)
if isinstance(tmp_data['data'], bytes):
tmp_data['data'] = tmp_data['data'].decode(__salt_system_encoding__)
return StringIO(tmp_data['data']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.