text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _remove_predicates(xast, node, context):
'''Remove any constructible predicates specified in the xpath
relative to the specified node.
:param xast: parsed xpath (xpath abstract syntax tree) from
:mod:`eulxml.xpath`
:param node: lxml element which predicates will be removed from
:param context: any context required for the xpath (e.g.,
namespace definitions)
:returns: updated a copy of the xast without the predicates that
were successfully removed
'''
# work from a copy since it may be modified
xast_c = deepcopy(xast)
# check if predicates are constructable
for pred in list(xast_c.predicates):
# ignore predicates that we can't construct
if not _predicate_is_constructible(pred):
continue
if isinstance(pred, ast.BinaryExpression):
# TODO: support any other predicate operators?
# predicate construction supports op /
# If the xml still matches the constructed value, remove it.
# e.g., @type='text' or level='leaf'
if pred.op == '=' and \
node.xpath(serialize(pred), **context) is True:
# predicate xpath returns True if node=value
if isinstance(pred.left, ast.Step):
if pred.left.axis in ('@', 'attribute'):
if _remove_attribute_node(node, context, pred.left):
# remove from the xast
xast_c.predicates.remove(pred)
elif pred.left.axis in (None, 'child'):
if _remove_child_node(node, context, pred.left, if_empty=True):
xast_c.predicates.remove(pred)
elif isinstance(pred.left, ast.BinaryExpression):
# e.g., level/@id='b' or level/deep='deeper'
# - value has already been checked by xpath above,
# so just remove the multipart path
_remove_xml(pred.left, node, context, if_empty=True)
return xast_c
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pop(self, i=None):
"""Remove the item at the given position in the list, and return it. If no index is specified, removes and returns the last item in the list."""
|
if i is None:
i = len(self) - 1
val = self[i]
del(self[i])
return val
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_scc_from_tuples(constraints):
"""Given set of equivalences, return map of transitive equivalence classes. >> constraints = [(1,2), (2,3)] >> get_scc_from_tuples(constraints) { 1: (1, 2, 3), 2: (1, 2, 3), 3: (1, 2, 3), } """
|
classes = unionfind.classes(constraints)
return dict((x, tuple(c)) for x, c in classes.iteritems())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_field_list(fieldnames, include_parents=False):
""" Parse a list of field names, possibly including dot-separated subform fields, into an internal ParsedFieldList object representing the base fields and subform listed. :param fieldnames: a list of field names as strings. dot-separated names are interpreted as subform fields. :param include_parents: optional boolean, defaults to False. if True, subform fields implicitly include their parent fields in the parsed list. """
|
field_parts = (name.split('.') for name in fieldnames)
return _collect_fields(field_parts, include_parents)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def xmlobject_to_dict(instance, fields=None, exclude=None, prefix=''):
""" Generate a dictionary based on the data in an XmlObject instance to pass as a Form's ``initial`` keyword argument. :param instance: instance of :class:`~eulxml.xmlmap.XmlObject` :param fields: optional list of fields - if specified, only the named fields will be included in the data returned :param exclude: optional list of fields to exclude from the data """
|
data = {}
# convert prefix to combining form for convenience
if prefix:
prefix = '%s-' % prefix
else:
prefix = ''
for name, field in six.iteritems(instance._fields):
# not editable?
if fields and not name in fields:
continue
if exclude and name in exclude:
continue
if isinstance(field, xmlmap.fields.NodeField):
nodefield = getattr(instance, name)
if nodefield is not None:
subprefix = '%s%s' % (prefix, name)
node_data = xmlobject_to_dict(nodefield, prefix=subprefix)
data.update(node_data) # FIXME: fields/exclude
if isinstance(field, xmlmap.fields.NodeListField):
for i, child in enumerate(getattr(instance, name)):
subprefix = '%s%s-%d' % (prefix, name, i)
node_data = xmlobject_to_dict(child, prefix=subprefix)
data.update(node_data) # FIXME: fields/exclude
else:
data[prefix + name] = getattr(instance, name)
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_instance(self):
"""Save bound form data into the XmlObject model instance and return the updated instance."""
|
# NOTE: django model form has a save method - not applicable here,
# since an XmlObject by itself is not expected to have a save method
# (only likely to be saved in context of a fedora or exist object)
if hasattr(self, 'cleaned_data'): # possible to have an empty object/no data
opts = self._meta
# NOTE: _fields doesn't seem to order, which is
# problematic for some xml (e.g., where order matters for validity)
# use field order as declared in the form for update order
# when possible.
# (NOTE: this could be problematic also, since display order may
# not always be the same as schema order)
fields_in_order = []
if hasattr(self.Meta, 'fields'):
fields_in_order.extend(self.Meta.fields)
fields_in_order.extend([name for name in six.iterkeys(self.instance._fields)
if name in self.Meta.fields])
else:
fields_in_order = self.instance._fields.keys()
for name in fields_in_order:
# for name in self.instance._fields.iterkeys():
# for name in self.declared_fields.iterkeys():
if opts.fields and name not in opts.parsed_fields.fields:
continue
if opts.exclude and name in opts.parsed_exclude.fields:
continue
if name in self.cleaned_data:
# special case: we don't want empty attributes and elements
# for fields which returned no data from the form
# converting '' to None and letting XmlObject handle
if self.cleaned_data[name] == '':
self.cleaned_data[name] = None
setattr(self.instance, name, self.cleaned_data[name])
# update sub-model portions via any subforms
for name, subform in six.iteritems(self.subforms):
self._update_subinstance(name, subform)
for formset in six.itervalues(self.formsets):
formset.update_instance()
return self.instance
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_subinstance(self, name, subform):
"""Save bound data for a single subform into the XmlObject model instance."""
|
old_subinstance = getattr(self.instance, name)
new_subinstance = subform.update_instance()
# if our instance previously had no node for the subform AND the
# updated one has data, then attach the new node.
if old_subinstance is None and not new_subinstance.is_empty():
setattr(self.instance, name, new_subinstance)
# on the other hand, if the instance previously had a node for the
# subform AND the updated one is empty, then remove the node.
if old_subinstance is not None and new_subinstance.is_empty():
delattr(self.instance, name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_bitcoind_connection( rpc_username, rpc_password, server, port, use_https, timeout ):
""" Creates an RPC client to a bitcoind instance. It will have ".opts" defined as a member, which will be a dict that stores the above connection options. """
|
from .bitcoin_blockchain import AuthServiceProxy
global do_wrap_socket, create_ssl_authproxy
log.debug("[%s] Connect to bitcoind at %s://%s@%s:%s, timeout=%s" % (os.getpid(), 'https' if use_https else 'http', rpc_username, server, port, timeout) )
protocol = 'https' if use_https else 'http'
if not server or len(server) < 1:
raise Exception('Invalid bitcoind host address.')
if not port or not is_valid_int(port):
raise Exception('Invalid bitcoind port number.')
authproxy_config_uri = '%s://%s:%s@%s:%s' % (protocol, rpc_username, rpc_password, server, port)
if use_https:
# TODO: ship with a cert
if do_wrap_socket:
# ssl._create_unverified_context and ssl.create_default_context are not supported.
# wrap the socket directly
connection = BitcoindConnection( server, int(port), timeout=timeout )
ret = AuthServiceProxy(authproxy_config_uri, connection=connection)
elif create_ssl_authproxy:
# ssl has _create_unverified_context, so we're good to go
ret = AuthServiceProxy(authproxy_config_uri, timeout=timeout)
else:
# have to set up an unverified context ourselves
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
connection = httplib.HTTPSConnection( server, int(port), context=ssl_ctx, timeout=timeout )
ret = AuthServiceProxy(authproxy_config_uri, connection=connection)
else:
ret = AuthServiceProxy(authproxy_config_uri)
# remember the options
bitcoind_opts = {
"bitcoind_user": rpc_username,
"bitcoind_passwd": rpc_password,
"bitcoind_server": server,
"bitcoind_port": port,
"bitcoind_use_https": use_https,
"bitcoind_timeout": timeout
}
setattr( ret, "opts", bitcoind_opts )
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect_bitcoind_impl( bitcoind_opts ):
""" Create a connection to bitcoind, using a dict of config options. """
|
if 'bitcoind_port' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_port'] is None:
log.error("No port given")
raise ValueError("No RPC port given (bitcoind_port)")
if 'bitcoind_timeout' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_timeout'] is None:
# default
bitcoind_opts['bitcoind_timeout'] = 300
try:
int(bitcoind_opts['bitcoind_port'])
except:
log.error("Not an int: '%s'" % bitcoind_opts.get('bitcoind_port'))
raise
try:
float(bitcoind_opts.get('bitcoind_timeout', 300))
except:
log.error("Not a float: '%s'" % bitcoind_opts.get('bitcoind_timeout', 300))
raise
return create_bitcoind_connection( bitcoind_opts['bitcoind_user'], bitcoind_opts['bitcoind_passwd'], \
bitcoind_opts['bitcoind_server'], int(bitcoind_opts['bitcoind_port']), \
bitcoind_opts.get('bitcoind_use_https', False), float(bitcoind_opts.get('bitcoind_timeout', 300)) )
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bitcoind_client(config_path=None, bitcoind_opts=None):
""" Connect to bitcoind """
|
if bitcoind_opts is None and config_path is None:
raise ValueError("Need bitcoind opts or config path")
bitcoind_opts = get_bitcoind_config(config_file=config_path)
log.debug("Connect to bitcoind at %s:%s (%s)" % (bitcoind_opts['bitcoind_server'], bitcoind_opts['bitcoind_port'], config_path))
client = connect_bitcoind_impl( bitcoind_opts )
return client
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_privkey_compressed(privkey, compressed=True):
""" Make sure the private key given is compressed or not compressed """
|
if len(privkey) != 64 and len(privkey) != 66:
raise ValueError("expected 32-byte private key as a hex string")
# compressed?
if compressed and len(privkey) == 64:
privkey += '01'
if not compressed and len(privkey) == 66:
if privkey[-2:] != '01':
raise ValueError("private key does not end in '01'")
privkey = privkey[:-2]
return privkey
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_pubkey_hex( privatekey_hex ):
""" Get the uncompressed hex form of a private key """
|
if not isinstance(privatekey_hex, (str, unicode)):
raise ValueError("private key is not a hex string but {}".format(str(type(privatekey_hex))))
# remove 'compressed' hint
if len(privatekey_hex) > 64:
if privatekey_hex[-2:] != '01':
raise ValueError("private key does not end in 01")
privatekey_hex = privatekey_hex[:64]
# get hex public key
privatekey_int = int(privatekey_hex, 16)
privk = ec.derive_private_key(privatekey_int, ec.SECP256K1(), default_backend())
pubk = privk.public_key()
x = pubk.public_numbers().x
y = pubk.public_numbers().y
pubkey_hex = "04{:064x}{:064x}".format(x, y)
return pubkey_hex
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_privkey_hex(privkey_hex):
""" Decode a private key for ecdsa signature """
|
if not isinstance(privkey_hex, (str, unicode)):
raise ValueError("private key is not a string")
# force uncompressed
priv = str(privkey_hex)
if len(priv) > 64:
if priv[-2:] != '01':
raise ValueError("private key does not end in '01'")
priv = priv[:64]
pk_i = int(priv, 16)
return pk_i
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_pubkey_hex(pubkey_hex):
""" Decode a public key for ecdsa verification """
|
if not isinstance(pubkey_hex, (str, unicode)):
raise ValueError("public key is not a string")
pubk = keylib.key_formatting.decompress(str(pubkey_hex))
assert len(pubk) == 130
pubk_raw = pubk[2:]
pubk_i = (int(pubk_raw[:64], 16), int(pubk_raw[64:], 16))
return pubk_i
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encode_signature(sig_r, sig_s):
""" Encode an ECDSA signature, with low-s """
|
# enforce low-s
if sig_s * 2 >= SECP256k1_order:
log.debug("High-S to low-S")
sig_s = SECP256k1_order - sig_s
sig_bin = '{:064x}{:064x}'.format(sig_r, sig_s).decode('hex')
assert len(sig_bin) == 64
sig_b64 = base64.b64encode(sig_bin)
return sig_b64
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_signature(sigb64):
""" Decode a signature into r, s """
|
sig_bin = base64.b64decode(sigb64)
if len(sig_bin) != 64:
raise ValueError("Invalid base64 signature")
sig_hex = sig_bin.encode('hex')
sig_r = int(sig_hex[:64], 16)
sig_s = int(sig_hex[64:], 16)
return sig_r, sig_s
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sign_raw_data(raw_data, privatekey_hex):
""" Sign a string of data. Returns signature as a base64 string """
|
if not isinstance(raw_data, (str, unicode)):
raise ValueError("Data is not a string")
raw_data = str(raw_data)
si = ECSigner(privatekey_hex)
si.update(raw_data)
return si.finalize()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_raw_data(raw_data, pubkey_hex, sigb64):
""" Verify the signature over a string, given the public key and base64-encode signature. Return True on success. Return False on error. """
|
if not isinstance(raw_data, (str, unicode)):
raise ValueError("data is not a string")
raw_data = str(raw_data)
vi = ECVerifier(pubkey_hex, sigb64)
vi.update(raw_data)
return vi.verify()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sign_digest(hash_hex, privkey_hex, hashfunc=hashlib.sha256):
""" Given a digest and a private key, sign it. Return the base64-encoded signature """
|
if not isinstance(hash_hex, (str, unicode)):
raise ValueError("hash hex is not a string")
hash_hex = str(hash_hex)
pk_i = decode_privkey_hex(privkey_hex)
privk = ec.derive_private_key(pk_i, ec.SECP256K1(), default_backend())
sig = privk.sign(hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256())))
sig_r, sig_s = decode_dss_signature(sig)
sigb64 = encode_signature(sig_r, sig_s)
return sigb64
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
""" Get the base64-encoded signature itself. Can only be called once. """
|
signature = self.signer.finalize()
sig_r, sig_s = decode_dss_signature(signature)
sig_b64 = encode_signature(sig_r, sig_s)
return sig_b64
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, data):
""" Update the hash used to generate the signature """
|
try:
self.verifier.update(data)
except TypeError:
log.error("Invalid data: {} ({})".format(type(data), data))
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def semiconvergents(x):
"""Semiconvergents of continued fraction expansion of a Fraction x."""
|
(q, n), d = divmod(x.numerator, x.denominator), x.denominator
yield Fraction(q)
p0, q0, p1, q1 = 1, 0, q, 1
while n:
(q, n), d = divmod(d, n), n
for _ in range(q):
p0, q0 = p0+p1, q0+q1
yield Fraction(p0, q0)
p0, q0, p1, q1 = p1, q1, p0, q0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad', axis=None):
"""Replace values given in 'to_replace' with 'value'. Wrapper around the :meth:`pandas.DataFrame.replace` method. """
|
if inplace:
self._frame.replace(to_replace=to_replace, value=value,
inplace=inplace, limit=limit, regex=regex,
method=method, axis=axis)
else:
new = self.__class__(self._frame.replace(
to_replace=to_replace, value=value, inplace=inplace,
limit=limit, regex=regex, method=method, axis=axis))
new.metadata = self.metadata.copy()
new._metadata = copy.deepcopy(self._metadata)
return new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(self, other, ignore_index=False):
"""Append rows of `other` to the end of this frame, returning a new object. Wrapper around the :meth:`pandas.DataFrame.append` method. Args: other (Cartesian):
ignore_index (sequence, bool, int):
If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. Returns: Cartesian: """
|
if not isinstance(other, self.__class__):
raise ValueError('May only append instances of same type.')
if type(ignore_index) is bool:
new_frame = self._frame.append(other._frame,
ignore_index=ignore_index,
verify_integrity=True)
else:
new_frame = self._frame.append(other._frame,
ignore_index=True,
verify_integrity=True)
if type(ignore_index) is int:
new_frame.index = range(ignore_index,
ignore_index + len(new_frame))
else:
new_frame.index = ignore_index
return self.__class__(new_frame)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self, *args, **kwargs):
"""Applies function along input axis of DataFrame. Wrapper around the :meth:`pandas.DataFrame.apply` method. """
|
return self.__class__(self._frame.apply(*args, **kwargs),
metadata=self.metadata,
_metadata=self._metadata)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def applymap(self, *args, **kwargs):
"""Applies function elementwise Wrapper around the :meth:`pandas.DataFrame.applymap` method. """
|
return self.__class__(self._frame.applymap(*args, **kwargs),
metadata=self.metadata,
_metadata=self._metadata)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def marshal_with_model(model, excludes=None, only=None, extends=None):
"""With this decorator, you can return ORM model instance, or ORM query in view function directly. We'll transform these objects to standard python data structures, like Flask-RESTFul's `marshal_with` decorator. And, you don't need define fields at all. You can specific columns to be returned, by `excludes` or `only` parameter. (Don't use these tow parameters at the same time, otherwise only `excludes` parameter will be used.) If you want return fields that outside of model, or overwrite the type of some fields, use `extends` parameter to specify them. Notice: this function only support `Flask-SQLAlchemy` Example: class Student(db.Model):
id = Column(Integer, primary_key=True) name = Column(String(100)) age = Column(Integer) class SomeApi(Resource):
@marshal_with_model(Student, excludes=['id']) def get(self):
return Student.query # response: [{"name": "student_a", "age": "16"}, {"name": "student_b", "age": 18}] class AnotherApi(Resource):
@marshal_with_model(Student, extends={"nice_guy": fields.Boolean, "age": fields.String}) def get(self):
student = Student.query.get(1) student.nice_guy = True student.age = "young" if student.age < 18 else "old" # transform int field to string return student """
|
if isinstance(excludes, six.string_types):
excludes = [excludes]
if excludes and only:
only = None
elif isinstance(only, six.string_types):
only = [only]
field_definition = {}
for col in model.__table__.columns:
if only:
if col.name not in only:
continue
elif excludes and col.name in excludes:
continue
field_definition[col.name] = _type_map[col.type.python_type.__name__]
if extends is not None:
for k, v in extends.items():
field_definition[k] = v
def decorated(f):
@wraps(f)
@_marshal_with(field_definition)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
return result if not _fields.is_indexable_but_not_string(result) else [v for v in result]
return wrapper
return decorated
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quick_marshal(*args, **kwargs):
"""In some case, one view functions may return different model in different situation. Use `marshal_with_model` to handle this situation was tedious. This function can simplify this process. Usage: quick_marshal(args_to_marshal_with_model)(db_instance_or_query) """
|
@marshal_with_model(*args, **kwargs)
def fn(value):
return value
return fn
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _wrap_field(field):
"""Improve Flask-RESTFul's original field type"""
|
class WrappedField(field):
def output(self, key, obj):
value = _fields.get_value(key if self.attribute is None else self.attribute, obj)
# For all fields, when its value was null (None), return null directly,
# instead of return its default value (eg. int type's default value was 0)
# Because sometimes the client **needs** to know, was a field of the model empty, to decide its behavior.
return None if value is None else self.format(value)
return WrappedField
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_configuration_file(filepath=_give_default_file_path(), overwrite=False):
"""Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str):
Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool):
Returns: None: """
|
config = configparser.ConfigParser()
config.read_dict(settings)
if os.path.isfile(filepath) and not overwrite:
try:
raise FileExistsError
except NameError: # because of python2
warn('File exists already and overwrite is False (default).')
else:
with open(filepath, 'w') as configfile:
config.write(configfile)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hasSubseries(self):
"""Check if this component has subseries or not. Determined based on level of first subcomponent (series or subseries) or if first component has subcomponents present. :rtype: boolean """
|
if self.c and self.c[0] and ((self.c[0].level in ('series', 'subseries')) or
(self.c[0].c and self.c[0].c[0])):
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize( self, M_c, M_r, T, seed, initialization=b'from_the_prior', row_initialization=-1, n_chains=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31,):
"""Sample a latent state from prior. T, list of lists: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :returns: X_L, X_D -- the latent state """
|
# FIXME: why is M_r passed?
arg_tuples = self.get_initialize_arg_tuples(
M_c, M_r, T, initialization, row_initialization, n_chains,
ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID, S_GRID, MU_GRID, N_GRID,
make_get_next_seed(seed),)
chain_tuples = self.mapper(self.do_initialize, arg_tuples)
X_L_list, X_D_list = zip(*chain_tuples)
if n_chains == 1:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
return X_L_list, X_D_list
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert( self, M_c, T, X_L_list, X_D_list, new_rows=None, N_GRID=31, CT_KERNEL=0):
"""Insert mutates the data T."""
|
if new_rows is None:
raise ValueError("new_row must exist")
if not isinstance(new_rows, list):
raise TypeError('new_rows must be list of lists')
if not isinstance(new_rows[0], list):
raise TypeError('new_rows must be list of lists')
X_L_list, X_D_list, was_multistate = su.ensure_multistate(
X_L_list, X_D_list)
# get insert arg tuples
arg_tuples = self.get_insert_arg_tuples(
M_c, T, X_L_list, X_D_list, new_rows, N_GRID, CT_KERNEL)
chain_tuples = self.mapper(self.do_insert, arg_tuples)
X_L_list, X_D_list = zip(*chain_tuples)
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
T.extend(new_rows)
ret_tuple = X_L_list, X_D_list, T
return ret_tuple
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, M_c, T, X_L, X_D, seed, kernel_list=(), n_steps=1, c=(), r=(), max_iterations=-1, max_time=-1, do_diagnostics=False, diagnostics_every_N=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31, do_timing=False, CT_KERNEL=0, progress=None, ):
"""Evolve the latent state by running MCMC transition kernels. :param seed: The random seed :type seed: int :param M_c: The column metadata :type M_c: dict :param T: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :param X_L: the latent variables associated with the latent state :type X_L: dict :param X_D: the particular cluster assignments of each row in each view :type X_D: list of lists :param kernel_list: names of the MCMC transition kernels to run :type kernel_list: list of strings :param n_steps: the number of times to run each MCMC transition kernel :type n_steps: int :param c: the (global) column indices to run MCMC transition kernels on :type c: list of ints :param r: the (global) row indices to run MCMC transition kernels on :type r: list of ints :param max_iterations: the maximum number of times ot run each MCMC transition kernel. Applicable only if max_time != -1. :type max_iterations: int :param max_time: the maximum amount of time (seconds) to run MCMC transition kernels for before stopping to return progress :type max_time: float :param progress: a function accepting (n_steps, max_time, step_idx, elapsed_secs, end=None) where `n_steps` is the total number of transition steps, `max_time` is the timeout in secods, `step_idx` is number of transitions so far, `elapsed_secs` is the amount of time so far, and `end=None` is an optional kwarg for indicating the analysis has been completed. For example, `progress` may be used to print a progress bar to standard out. :type progress: function pointer. :returns: X_L, X_D -- the evolved latent state """
|
if n_steps <= 0:
raise ValueError("You must do at least one analyze step.")
if CT_KERNEL not in [0, 1]:
raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)")
if do_timing:
# Diagnostics and timing are exclusive.
do_diagnostics = False
diagnostic_func_dict, reprocess_diagnostics_func = \
do_diagnostics_to_func_dict(do_diagnostics)
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
arg_tuples = self.get_analyze_arg_tuples(
M_c,
T,
X_L_list,
X_D_list,
kernel_list,
n_steps,
c,
r,
max_iterations,
max_time,
diagnostic_func_dict,
diagnostics_every_N,
ROW_CRP_ALPHA_GRID,
COLUMN_CRP_ALPHA_GRID,
S_GRID,
MU_GRID,
N_GRID,
do_timing,
CT_KERNEL,
progress,
make_get_next_seed(seed))
chain_tuples = self.mapper(self.do_analyze, arg_tuples)
X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples)
if do_timing:
timing_list = diagnostics_dict_list
if not was_multistate:
X_L_list, X_D_list = X_L_list[0], X_D_list[0]
ret_tuple = X_L_list, X_D_list
if diagnostic_func_dict is not None:
diagnostics_dict = munge_diagnostics(diagnostics_dict_list)
if reprocess_diagnostics_func is not None:
diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict)
ret_tuple = ret_tuple + (diagnostics_dict, )
if do_timing:
ret_tuple = ret_tuple + (timing_list, )
return ret_tuple
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simple_predictive_sample(self, M_c, X_L, X_D, Y, Q, seed, n=1):
"""Sample values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v):
r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to draw :type n: int :returns: list of floats. Samples in the same order specified by Q """
|
get_next_seed = make_get_next_seed(seed)
samples = _do_simple_predictive_sample(
M_c, X_L, X_D, Y, Q, n, get_next_seed)
return samples
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mutual_information( self, M_c, X_L_list, X_D_list, Q, seed, n_samples=1000):
"""Estimate mutual information for each pair of columns on Q given the set of samples. :param Q: List of tuples where each tuple contains the two column indexes to compare :type Q: list of two-tuples of ints :param n_samples: the number of simple predictive samples to use :type n_samples: int :returns: list of list -- where each sublist is a set of MIs and Linfoots from each crosscat sample. """
|
get_next_seed = make_get_next_seed(seed)
return iu.mutual_information(
M_c, X_L_list, X_D_list, Q, get_next_seed, n_samples)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def similarity( self, M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns=None):
"""Computes the similarity of the given row to the target row, averaged over all the column indexes given by target_columns. :param given_row_id: the id of one of the rows to measure similarity between :type given_row_id: int :param target_row_id: the id of the other row to measure similarity between :type target_row_id: int :param target_columns: the columns to average the similarity over. Defaults to all columns. :type target_columns: int, string, or list of ints :returns: float """
|
return su.similarity(
M_c, X_L_list, X_D_list, given_row_id,
target_row_id, target_columns)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def impute(self, M_c, X_L, X_D, Y, Q, seed, n):
"""Impute values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r,d,v):
r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of floats -- imputed values in the same order as specified by Q """
|
get_next_seed = make_get_next_seed(seed)
e = su.impute(M_c, X_L, X_D, Y, Q, n, get_next_seed)
return e
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def impute_and_confidence(self, M_c, X_L, X_D, Y, Q, seed, n):
"""Impute values and confidence of the value from the predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v):
r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d):
r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of lists -- list of (value, confidence) tuples in the same order as specified by Q """
|
get_next_seed = make_get_next_seed(seed)
if isinstance(X_L, (list, tuple)):
assert isinstance(X_D, (list, tuple))
# TODO: multistate impute doesn't exist yet
# e,confidence = su.impute_and_confidence_multistate(
# M_c, X_L, X_D, Y, Q, n, self.get_next_seed)
e, confidence = su.impute_and_confidence(
M_c, X_L, X_D, Y, Q, n, get_next_seed)
else:
e, confidence = su.impute_and_confidence(
M_c, X_L, X_D, Y, Q, n, get_next_seed)
return (e, confidence)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_col_dep_constraints( self, M_c, M_r, T, X_L, X_D, dep_constraints, seed, max_rejections=100):
"""Ensures dependencey or indepdendency between columns. `dep_constraints` is a list of where each entry is an (int, int, bool) tuple where the first two entries are column indices and the third entry describes whether the columns are to be dependent (True) or independent (False). Behavior Notes: `ensure_col_dep_constraints` will add `col_ensure` enforcement to the metadata (top level of `X_L`); unensure_col will remove it. Calling ensure_col_dep_constraints twice will replace the first ensure. This operation destroys the existing `X_L` and `X_D` metadata; the user should be aware that it will clobber any existing analyses. Implementation Notes: Initialization is implemented via rejection (by repeatedly initalizing states and throwing ones out that do not adhear to dep_constraints). This means that in the event the contraints in dep_constraints are complex, or impossible, that the rejection alogrithm may fail. The returned metadata looks like this: [(1, 2, True), (2, 5, True), (1, 3, False)] { "dependent" : { 1 : (1, 2, 5), 2 : (1, 2, 5), 5 : (1, 5, 2), }, "independent" : { 1 : [3], 3 : [1], } } """
|
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
if was_multistate:
num_states = len(X_L_list)
else:
num_states = 1
dependencies = [(c[0], c[1]) for c in dep_constraints if c[2]]
independencies = [(c[0], c[1]) for c in dep_constraints if not c[2]]
col_ensure_md = dict()
col_ensure_md[True] = {
str(key) : list(val) for
key, val in gu.get_scc_from_tuples(dependencies).iteritems()
}
col_ensure_md[False] = {
str(key) : list(val) for
key, val in gu.get_scc_from_tuples(independencies).iteritems()
}
def assert_dep_constraints(X_L, X_D, dep_constraints):
for col1, col2, dep in dep_constraints:
if not self.assert_col_dep_constraints(
X_L, X_D, col1, col2, dep, True):
return False
return True
X_L_out = []
X_D_out = []
get_next_seed = make_get_next_seed(seed)
for _ in range(num_states):
counter = 0
X_L_i, X_D_i = self.initialize(M_c, M_r, T, get_next_seed())
while not assert_dep_constraints(X_L_i, X_D_i, dep_constraints):
if counter > max_rejections:
raise RuntimeError(
'Could not ranomly generate a partition '
'that satisfies the constraints in dep_constraints.')
counter += 1
X_L_i, X_D_i = self.initialize(M_c, M_r, T, get_next_seed())
X_L_i['col_ensure'] = dict()
X_L_i['col_ensure']['dependent'] = col_ensure_md[True]
X_L_i['col_ensure']['independent'] = col_ensure_md[False]
X_D_out.append(X_D_i)
X_L_out.append(X_L_i)
if was_multistate:
return X_L_out, X_D_out
else:
return X_L_out[0], X_D_out[0]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_row_dep_constraint( self, M_c, T, X_L, X_D, row1, row2, dependent=True, wrt=None, max_iter=100, force=False):
"""Ensures dependencey or indepdendency between rows with respect to columns."""
|
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D)
if force:
raise NotImplementedError
else:
kernel_list = ('row_partition_assignements',)
for i, (X_L_i, X_D_i) in enumerate(zip(X_L_list, X_D_list)):
iters = 0
X_L_tmp = copy.deepcopy(X_L_i)
X_D_tmp = copy.deepcopy(X_D_i)
while not self.assert_row(
X_L_tmp, X_D_tmp, row1, row2,
dependent=dependent, wrt=wrt):
if iters >= max_iter:
raise RuntimeError(
'Maximum ensure iterations reached.')
# XXX No seed?
res = self.analyze(
M_c, T, X_L_i, X_D_i, kernel_list=kernel_list,
n_steps=1, r=(row1,))
X_L_tmp = res[0]
X_D_tmp = res[1]
iters += 1
X_L_list[i] = X_L_tmp
X_D_list[i] = X_D_tmp
if was_multistate:
return X_L_list, X_D_list
else:
return X_L_list[0], X_D_list[0]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_format_specifier(specification):
""" Parse the given format specification and return a dictionary containing relevant values. """
|
m = _parse_format_specifier_regex.match(specification)
if m is None:
raise ValueError(
"Invalid format specifier: {!r}".format(specification))
format_dict = m.groupdict('')
# Convert zero-padding into fill and alignment.
zeropad = format_dict.pop('zeropad')
if zeropad:
# If zero padding is requested, fill and align fields should be absent.
if format_dict['align']:
raise ValueError(
"Invalid format specifier: {!r}".format(specification))
# Impossible to have 'fill' without 'align'.
assert not format_dict['fill']
format_dict['align'] = '='
format_dict['fill'] = '0'
# Default alignment is right-aligned.
if not format_dict['align']:
format_dict['align'] = '>'
# Default fill character is space.
if not format_dict['fill']:
format_dict['fill'] = ' '
# Default sign is '-'.
if not format_dict['sign']:
format_dict['sign'] = '-'
# Convert minimum width to an int; default is zero.
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
# Convert precision to an int, or `None` if no precision given.
if format_dict['precision']:
format_dict['precision'] = int(format_dict['precision'][1:])
else:
format_dict['precision'] = None
# If no rounding mode is given, assume 'N'.
if not format_dict['rounding']:
format_dict['rounding'] = 'N'
return format_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bonds(self, self_bonding_allowed=False, offset=3, modified_properties=None, use_lookup=False, set_lookup=True, atomic_radius_data=None ):
"""Return a dictionary representing the bonds. .. warning:: This function is **not sideeffect free**, since it assigns the output to a variable ``self._metadata['bond_dict']`` if ``set_lookup`` is ``True`` (which is the default). This is necessary for performance reasons. ``.get_bonds()`` will use or not use a lookup depending on ``use_lookup``. Greatly increases performance if True, but could introduce bugs in certain situations. Just imagine a situation where the :class:`~Cartesian` is changed manually. If you apply lateron a method e.g. :meth:`~get_zmat()` that makes use of :meth:`~get_bonds()` the dictionary of the bonds may not represent the actual situation anymore. You have two possibilities to cope with this problem. Either you just re-execute ``get_bonds`` on your specific instance, or you change the ``internally_use_lookup`` option in the settings. Please note that the internal use of the lookup variable greatly improves performance. Args: modified_properties (dic):
If you want to change the van der Vaals radius of one or more specific atoms, pass a dictionary that looks like:: modified_properties = {index1: 1.5} For global changes use the constants module. offset (float):
use_lookup (bool):
set_lookup (bool):
self_bonding_allowed (bool):
atomic_radius_data (str):
Defines which column of :attr:`constants.elements` is used. The default is ``atomic_radius_cc`` and can be changed with :attr:`settings['defaults']['atomic_radius_data']`. Compare with :func:`add_data`. Returns: dict: Dictionary mapping from an atom index to the set of indices of atoms bonded to. """
|
if atomic_radius_data is None:
atomic_radius_data = settings['defaults']['atomic_radius_data']
def complete_calculation():
old_index = self.index
self.index = range(len(self))
fragments = self._divide_et_impera(offset=offset)
positions = np.array(self.loc[:, ['x', 'y', 'z']], order='F')
data = self.add_data([atomic_radius_data, 'valency'])
bond_radii = data[atomic_radius_data]
if modified_properties is not None:
bond_radii.update(pd.Series(modified_properties))
bond_radii = bond_radii.values
bond_dict = collections.defaultdict(set)
for i, j, k in product(*[range(x) for x in fragments.shape]):
# The following call is not side effect free and changes
# bond_dict
self._update_bond_dict(
fragments[i, j, k], positions, bond_radii,
bond_dict=bond_dict,
self_bonding_allowed=self_bonding_allowed)
for i in set(self.index) - set(bond_dict.keys()):
bond_dict[i] = {}
self.index = old_index
rename = dict(enumerate(self.index))
bond_dict = {rename[key]: {rename[i] for i in bond_dict[key]}
for key in bond_dict}
return bond_dict
if use_lookup:
try:
bond_dict = self._metadata['bond_dict']
except KeyError:
bond_dict = complete_calculation()
else:
bond_dict = complete_calculation()
if set_lookup:
self._metadata['bond_dict'] = bond_dict
return bond_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_coordination_sphere( self, index_of_atom, n_sphere=1, give_only_index=False, only_surface=True, exclude=None, use_lookup=None):
"""Return a Cartesian of atoms in the n-th coordination sphere. Connected means that a path along covalent bonds exists. Args: index_of_atom (int):
give_only_index (bool):
If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. n_sphere (int):
Determines the number of the coordination sphere. only_surface (bool):
Return only the surface of the coordination sphere. exclude (set):
A set of indices that should be ignored for the path finding. use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: A set of indices or a new Cartesian instance. """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
exclude = set() if exclude is None else exclude
bond_dict = self.get_bonds(use_lookup=use_lookup)
i = index_of_atom
if n_sphere != 0:
visited = set([i]) | exclude
try:
tmp_bond_dict = {j: (bond_dict[j] - visited)
for j in bond_dict[i]}
except KeyError:
tmp_bond_dict = {}
n = 0
while tmp_bond_dict and (n + 1) < n_sphere:
new_tmp_bond_dict = {}
for i in tmp_bond_dict:
if i in visited:
continue
visited.add(i)
for j in tmp_bond_dict[i]:
new_tmp_bond_dict[j] = bond_dict[j] - visited
tmp_bond_dict = new_tmp_bond_dict
n += 1
if only_surface:
index_out = set(tmp_bond_dict.keys())
else:
index_out = visited | set(tmp_bond_dict.keys())
else:
index_out = {i}
if give_only_index:
return index_out - exclude
else:
return self.loc[index_out - exclude]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _preserve_bonds(self, sliced_cartesian, use_lookup=None):
"""Is called after cutting geometric shapes. If you want to change the rules how bonds are preserved, when applying e.g. :meth:`Cartesian.cut_sphere` this is the function you have to modify. It is recommended to inherit from the Cartesian class to tailor it for your project, instead of modifying the source code of ChemCoord. Args: sliced_frame (Cartesian):
use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: Cartesian: """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
included_atoms_set = set(sliced_cartesian.index)
assert included_atoms_set.issubset(set(self.index)), \
'The sliced Cartesian has to be a subset of the bigger frame'
bond_dic = self.get_bonds(use_lookup=use_lookup)
new_atoms = set([])
for atom in included_atoms_set:
new_atoms = new_atoms | bond_dic[atom]
new_atoms = new_atoms - included_atoms_set
while not new_atoms == set([]):
index_of_interest = new_atoms.pop()
included_atoms_set = (
included_atoms_set |
self.get_coordination_sphere(
index_of_interest,
n_sphere=float('inf'),
only_surface=False,
exclude=included_atoms_set,
give_only_index=True,
use_lookup=use_lookup))
new_atoms = new_atoms - included_atoms_set
molecule = self.loc[included_atoms_set, :]
return molecule
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut_sphere( self, radius=15., origin=None, outside_sliced=True, preserve_bonds=False):
"""Cut a sphere specified by origin and radius. Args: radius (float):
origin (list):
Please note that you can also pass an integer. In this case it is interpreted as the index of the atom which is taken as origin. outside_sliced (bool):
Atoms outside/inside the sphere are cut out. preserve_bonds (bool):
Do not cut covalent bonds. Returns: Cartesian: """
|
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
molecule = self.get_distance_to(origin)
if outside_sliced:
molecule = molecule[molecule['distance'] < radius]
else:
molecule = molecule[molecule['distance'] > radius]
if preserve_bonds:
molecule = self._preserve_bonds(molecule)
return molecule
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cut_cuboid( self, a=20, b=None, c=None, origin=None, outside_sliced=True, preserve_bonds=False):
"""Cut a cuboid specified by edge and radius. Args: a (float):
Value of the a edge. b (float):
Value of the b edge. Takes value of a if None. c (float):
Value of the c edge. Takes value of a if None. origin (list):
Please note that you can also pass an integer. In this case it is interpreted as the index of the atom which is taken as origin. outside_sliced (bool):
Atoms outside/inside the sphere are cut away. preserve_bonds (bool):
Do not cut covalent bonds. Returns: Cartesian: """
|
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
b = a if b is None else b
c = a if c is None else c
sides = np.array([a, b, c])
pos = self.loc[:, ['x', 'y', 'z']]
if outside_sliced:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) < 1.]
else:
molecule = self[((pos - origin) / (sides / 2)).max(axis=1) > 1.]
if preserve_bonds:
molecule = self._preserve_bonds(molecule)
return molecule
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_barycenter(self):
"""Return the mass weighted average location. Args: None Returns: :class:`numpy.ndarray`: """
|
try:
mass = self['mass'].values
except KeyError:
mass = self.add_data('mass')['mass'].values
pos = self.loc[:, ['x', 'y', 'z']].values
return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bond_lengths(self, indices):
"""Return the distances between given atoms. Calculates the distance between the atoms with indices ``i`` and ``b``. The indices can be given in three ways: * As simple list ``[i, b]`` * As :class:`pd.DataFrame` where ``i`` is taken from the index and ``b`` from the respective column ``'b'``. Args: indices (list):
Returns: :class:`numpy.ndarray`: Vector of angles in degrees. """
|
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
return np.linalg.norm(i_pos - b_pos, axis=1)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_angle_degrees(self, indices):
"""Return the angles between given atoms. Calculates the angle in degrees between the atoms with indices ``i, b, a``. The indices can be given in three ways: * As simple list ``[i, b, a]`` * As :class:`pd.DataFrame` where ``i`` is taken from the index and ``b`` and ``a`` from the respective columns ``'b'`` and ``'a'``. Args: indices (list):
Returns: :class:`numpy.ndarray`: Vector of angles in degrees. """
|
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
a_pos = self.loc[indices.loc[:, 'a'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
a_pos = self.loc[indices[:, 2], coords].values
BI, BA = i_pos - b_pos, a_pos - b_pos
bi, ba = [v / np.linalg.norm(v, axis=1)[:, None] for v in (BI, BA)]
dot_product = np.sum(bi * ba, axis=1)
dot_product[dot_product > 1] = 1
dot_product[dot_product < -1] = -1
angles = np.degrees(np.arccos(dot_product))
return angles
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dihedral_degrees(self, indices, start_row=0):
"""Return the dihedrals between given atoms. Calculates the dihedral angle in degrees between the atoms with indices ``i, b, a, d``. The indices can be given in three ways: * As simple list ``[i, b, a, d]`` * As :class:`pandas.DataFrame` where ``i`` is taken from the index and ``b``, ``a`` and ``d``from the respective columns ``'b'``, ``'a'`` and ``'d'``. Args: indices (list):
Returns: :class:`numpy.ndarray`: Vector of angles in degrees. """
|
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
a_pos = self.loc[indices.loc[:, 'a'], coords].values
d_pos = self.loc[indices.loc[:, 'd'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
a_pos = self.loc[indices[:, 2], coords].values
d_pos = self.loc[indices[:, 3], coords].values
IB = b_pos - i_pos
BA = a_pos - b_pos
AD = d_pos - a_pos
N1 = np.cross(IB, BA, axis=1)
N2 = np.cross(BA, AD, axis=1)
n1, n2 = [v / np.linalg.norm(v, axis=1)[:, None] for v in (N1, N2)]
dot_product = np.sum(n1 * n2, axis=1)
dot_product[dot_product > 1] = 1
dot_product[dot_product < -1] = -1
dihedrals = np.degrees(np.arccos(dot_product))
# the next lines are to test the direction of rotation.
# is a dihedral really 90 or 270 degrees?
# Equivalent to direction of rotation of dihedral
where_to_modify = np.sum(BA * np.cross(n1, n2, axis=1), axis=1) > 0
where_to_modify = np.nonzero(where_to_modify)[0]
length = indices.shape[0] - start_row
sign = np.full(length, 1, dtype='float64')
to_add = np.full(length, 0, dtype='float64')
sign[where_to_modify] = -1
to_add[where_to_modify] = 360
dihedrals = to_add + sign * dihedrals
return dihedrals
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fragmentate(self, give_only_index=False, use_lookup=None):
"""Get the indices of non bonded parts in the molecule. Args: give_only_index (bool):
If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: list: A list of sets of indices or new Cartesian instances. """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
fragments = []
pending = set(self.index)
self.get_bonds(use_lookup=use_lookup)
while pending:
index = self.get_coordination_sphere(
pending.pop(), use_lookup=True, n_sphere=float('inf'),
only_surface=False, give_only_index=True)
pending = pending - index
if give_only_index:
fragments.append(index)
else:
fragment = self.loc[index]
fragment._metadata['bond_dict'] = fragment.restrict_bond_dict(
self._metadata['bond_dict'])
try:
fragment._metadata['val_bond_dict'] = (
fragment.restrict_bond_dict(
self._metadata['val_bond_dict']))
except KeyError:
pass
fragments.append(fragment)
return fragments
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def restrict_bond_dict(self, bond_dict):
"""Restrict a bond dictionary to self. Args: bond_dict (dict):
Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary """
|
return {j: bond_dict[j] & set(self.index) for j in self.index}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fragment(self, list_of_indextuples, give_only_index=False, use_lookup=None):
"""Get the indices of the atoms in a fragment. The list_of_indextuples contains all bondings from the molecule to the fragment. ``[(1,3), (2,4)]`` means for example that the fragment is connected over two bonds. The first bond is from atom 1 in the molecule to atom 3 in the fragment. The second bond is from atom 2 in the molecule to atom 4 in the fragment. Args: list_of_indextuples (list):
give_only_index (bool):
If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: A set of indices or a new Cartesian instance. """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
exclude = [tuple[0] for tuple in list_of_indextuples]
index_of_atom = list_of_indextuples[0][1]
fragment_index = self.get_coordination_sphere(
index_of_atom, exclude=set(exclude), n_sphere=float('inf'),
only_surface=False, give_only_index=True, use_lookup=use_lookup)
if give_only_index:
return fragment_index
else:
return self.loc[fragment_index, :]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_without(self, fragments, use_lookup=None):
"""Return self without the specified fragments. Args: fragments: Either a list of :class:`~chemcoord.Cartesian` or a :class:`~chemcoord.Cartesian`. use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: list: List containing :class:`~chemcoord.Cartesian`. """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
if pd.api.types.is_list_like(fragments):
for fragment in fragments:
try:
index_of_all_fragments |= fragment.index
except NameError:
index_of_all_fragments = fragment.index
else:
index_of_all_fragments = fragments.index
missing_part = self.loc[self.index.difference(index_of_all_fragments)]
missing_part = missing_part.fragmentate(use_lookup=use_lookup)
return sorted(missing_part, key=len, reverse=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _jit_pairwise_distances(pos1, pos2):
"""Optimized function for calculating the distance between each pair of points in positions1 and positions2. Does use python mode as fallback, if a scalar and not an array is given. """
|
n1 = pos1.shape[0]
n2 = pos2.shape[0]
D = np.empty((n1, n2))
for i in range(n1):
for j in range(n2):
D[i, j] = np.sqrt(((pos1[i] - pos2[j])**2).sum())
return D
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_inertia(self):
"""Calculate the inertia tensor and transforms along rotation axes. This function calculates the inertia tensor and returns a 4-tuple. The unit is ``amu * length-unit-of-xyz-file**2`` Args: None Returns: dict: The returned dictionary has four possible keys: ``transformed_Cartesian``: A :class:`~chemcoord.Cartesian` that is transformed to the basis spanned by the eigenvectors of the inertia tensor. The x-axis is the axis with the lowest inertia moment, the z-axis the one with the highest. Contains also a column for the mass ``diag_inertia_tensor``: A vector containing the ascendingly sorted inertia moments after diagonalization. ``inertia_tensor``: The inertia tensor in the old basis. ``eigenvectors``: The eigenvectors of the inertia tensor in the old basis. Since the inertia_tensor is hermitian, they are orthogonal and are returned as an orthonormal righthanded basis. The i-th eigenvector corresponds to the i-th eigenvalue in ``diag_inertia_tensor``. """
|
def calculate_inertia_tensor(molecule):
masses = molecule.loc[:, 'mass'].values
pos = molecule.loc[:, ['x', 'y', 'z']].values
inertia = np.sum(
masses[:, None, None]
* ((pos**2).sum(axis=1)[:, None, None]
* np.identity(3)[None, :, :]
- pos[:, :, None] * pos[:, None, :]),
axis=0)
diag_inertia, eig_v = np.linalg.eig(inertia)
sorted_index = np.argsort(diag_inertia)
diag_inertia = diag_inertia[sorted_index]
eig_v = eig_v[:, sorted_index]
return inertia, eig_v, diag_inertia
molecule = self.add_data('mass')
molecule = molecule - molecule.get_barycenter()
inertia, eig_v, diag_inertia = calculate_inertia_tensor(molecule)
eig_v = xyz_functions.orthonormalize_righthanded(eig_v)
molecule = molecule.basistransform(eig_v)
return {'transformed_Cartesian': molecule, 'eigenvectors': eig_v,
'diag_inertia_tensor': diag_inertia, 'inertia_tensor': inertia}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def basistransform(self, new_basis, old_basis=None, orthonormalize=True):
"""Transform the frame to a new basis. This function transforms the cartesian coordinates from an old basis to a new one. Please note that old_basis and new_basis are supposed to have full Rank and consist of three linear independent vectors. If rotate_only is True, it is asserted, that both bases are orthonormal and right handed. Besides all involved matrices are transposed instead of inverted. In some applications this may require the function :func:`xyz_functions.orthonormalize` as a previous step. Args: old_basis (np.array):
new_basis (np.array):
rotate_only (bool):
Returns: Cartesian: The transformed molecule. """
|
if old_basis is None:
old_basis = np.identity(3)
is_rotation_matrix = np.isclose(np.linalg.det(new_basis), 1)
if not is_rotation_matrix and orthonormalize:
new_basis = xyz_functions.orthonormalize_righthanded(new_basis)
is_rotation_matrix = True
if is_rotation_matrix:
return dot(np.dot(new_basis.T, old_basis), self)
else:
return dot(np.dot(np.linalg.inv(new_basis), old_basis), self)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_distance_to(self, origin=None, other_atoms=None, sort=False):
"""Return a Cartesian with a column for the distance from origin. """
|
if origin is None:
origin = np.zeros(3)
elif pd.api.types.is_list_like(origin):
origin = np.array(origin, dtype='f8')
else:
origin = self.loc[origin, ['x', 'y', 'z']]
if other_atoms is None:
other_atoms = self.index
new = self.loc[other_atoms, :].copy()
norm = np.linalg.norm
try:
new['distance'] = norm((new - origin).loc[:, ['x', 'y', 'z']],
axis=1)
except AttributeError:
# Happens if molecule consists of only one atom
new['distance'] = norm((new - origin).loc[:, ['x', 'y', 'z']])
if sort:
new.sort_values(by='distance', inplace=True)
return new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def change_numbering(self, rename_dict, inplace=False):
"""Return the reindexed version of Cartesian. Args: rename_dict (dict):
A dictionary mapping integers on integers. Returns: Cartesian: A renamed copy according to the dictionary passed. """
|
output = self if inplace else self.copy()
new_index = [rename_dict.get(key, key) for key in self.index]
output.index = new_index
if not inplace:
return output
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partition_chem_env(self, n_sphere=4, use_lookup=None):
"""This function partitions the molecule into subsets of the same chemical environment. A chemical environment is specified by the number of surrounding atoms of a certain kind around an atom with a certain atomic number represented by a tuple of a string and a frozenset of tuples. The ``n_sphere`` option determines how many branches the algorithm follows to determine the chemical environment. Example: A carbon atom in ethane has bonds with three hydrogen (atomic number 1) and one carbon atom (atomic number 6). If ``n_sphere=1`` these are the only atoms we are interested in and the chemical environment is:: ('C', frozenset([('H', 3), ('C', 1)])) If ``n_sphere=2`` we follow every atom in the chemical enviromment of ``n_sphere=1`` to their direct neighbours. In the case of ethane this gives:: ('C', frozenset([('H', 6), ('C', 1)])) In the special case of ethane this is the whole molecule; in other cases you can apply this operation recursively and stop after ``n_sphere`` or after reaching the end of branches. Args: n_sphere (int):
use_lookup (bool):
Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: dict: The output will look like this:: { (element_symbol, frozenset([tuples])) : set([indices]) } A dictionary mapping from a chemical environment to the set of indices of atoms in this environment. """
|
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
def get_chem_env(self, i, n_sphere):
env_index = self.get_coordination_sphere(
i, n_sphere=n_sphere, only_surface=False,
give_only_index=True, use_lookup=use_lookup)
env_index.remove(i)
atoms = self.loc[env_index, 'atom']
environment = frozenset(collections.Counter(atoms).most_common())
return (self.loc[i, 'atom'], environment)
chemical_environments = collections.defaultdict(set)
for i in self.index:
chemical_environments[get_chem_env(self, i, n_sphere)].add(i)
return dict(chemical_environments)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def align(self, other, indices=None, ignore_hydrogens=False):
"""Align two Cartesians. Minimize the RMSD (root mean squared deviation) between ``self`` and ``other``. Returns a tuple of copies of ``self`` and ``other`` where both are centered around their centroid and ``other`` is rotated unto ``self``. The rotation minimises the distances between the atom pairs of same label. Uses the Kabsch algorithm implemented within :func:`~.xyz_functions.get_kabsch_rotation` .. note:: If ``indices is None``, then ``len(self) == len(other)`` must be true and the elements in each index have to be the same. Args: other (Cartesian):
indices (sequence):
It is possible to specify a subset of indices that is used for the determination of the best rotation matrix:: If ``indices`` is given in this form, the rotation matrix minimises the distance between ``i1`` and ``j1``, ``i2`` and ``j2`` and so on. ignore_hydrogens (bool):
Returns: tuple: """
|
m1 = (self - self.get_centroid()).sort_index()
m2 = (other - other.get_centroid()).sort_index()
if indices is not None and ignore_hydrogens:
message = 'Indices != None and ignore_hydrogens == True is invalid'
raise IllegalArgumentCombination(message)
elif ignore_hydrogens:
m1 = m1[m1['atom'] != 'H']
m2 = m2[m2['atom'] != 'H']
elif indices is not None:
pos1 = m1.loc[indices[0], ['x', 'y', 'z']].values
pos2 = m2.loc[indices[1], ['x', 'y', 'z']].values
else:
pos1 = m1.loc[:, ['x', 'y', 'z']].values
pos2 = m2.loc[m1.index, ['x', 'y', 'z']].values
m2 = dot(xyz_functions.get_kabsch_rotation(pos1, pos2), m2)
return m1, m2
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reindex_similar(self, other, n_sphere=4):
"""Reindex ``other`` to be similarly indexed as ``self``. Returns a reindexed copy of ``other`` that minimizes the distance for each atom to itself in the same chemical environemt from ``self`` to ``other``. Read more about the definition of the chemical environment in :func:`Cartesian.partition_chem_env` .. note:: It is necessary to align ``self`` and other before applying this method. This can be done via :meth:`~Cartesian.align`. .. note:: It is probably necessary to improve the result using :meth:`~Cartesian.change_numbering()`. Args: other (Cartesian):
n_sphere (int):
Wrapper around the argument for :meth:`~Cartesian.partition_chem_env`. Returns: Cartesian: Reindexed version of other """
|
def make_subset_similar(m1, subset1, m2, subset2, index_dct):
"""Changes index_dct INPLACE"""
coords = ['x', 'y', 'z']
index1 = list(subset1)
for m1_i in index1:
dist_m2_to_m1_i = m2.get_distance_to(m1.loc[m1_i, coords],
subset2, sort=True)
m2_i = dist_m2_to_m1_i.index[0]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
counter = itertools.count()
found = False
while not found:
if m2_i in index_dct.keys():
old_m1_pos = m1.loc[index_dct[m2_i], coords]
if dist_new < np.linalg.norm(m2_pos_i - old_m1_pos):
index1.append(index_dct[m2_i])
index_dct[m2_i] = m1_i
found = True
else:
m2_i = dist_m2_to_m1_i.index[next(counter)]
dist_new = dist_m2_to_m1_i.loc[m2_i, 'distance']
m2_pos_i = dist_m2_to_m1_i.loc[m2_i, coords]
else:
index_dct[m2_i] = m1_i
found = True
return index_dct
molecule1 = self.copy()
molecule2 = other.copy()
partition1 = molecule1.partition_chem_env(n_sphere)
partition2 = molecule2.partition_chem_env(n_sphere)
index_dct = {}
for key in partition1:
message = ('You have chemically different molecules, regarding '
'the topology of their connectivity.')
assert len(partition1[key]) == len(partition2[key]), message
index_dct = make_subset_similar(molecule1, partition1[key],
molecule2, partition2[key],
index_dct)
molecule2.index = [index_dct[i] for i in molecule2.index]
return molecule2.loc[molecule1.index]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_date(date_str):
"""Parse elastic datetime string."""
|
if not date_str:
return None
try:
date = ciso8601.parse_datetime(date_str)
if not date:
date = arrow.get(date_str).datetime
except TypeError:
date = arrow.get(date_str[0]).datetime
return date
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dates(schema):
"""Return list of datetime fields for given schema."""
|
dates = [config.LAST_UPDATED, config.DATE_CREATED]
for field, field_schema in schema.items():
if field_schema['type'] == 'datetime':
dates.append(field)
return dates
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_doc(hit, schema, dates):
"""Format given doc to match given schema."""
|
doc = hit.get('_source', {})
doc.setdefault(config.ID_FIELD, hit.get('_id'))
doc.setdefault('_type', hit.get('_type'))
if hit.get('highlight'):
doc['es_highlight'] = hit.get('highlight')
if hit.get('inner_hits'):
doc['_inner_hits'] = {}
for key, value in hit.get('inner_hits').items():
doc['_inner_hits'][key] = []
for item in value.get('hits', {}).get('hits', []):
doc['_inner_hits'][key].append(item.get('_source', {}))
for key in dates:
if key in doc:
doc[key] = parse_date(doc[key])
return doc
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_filters(query, base_filters):
"""Put together all filters we have and set them as 'and' filter within filtered query. :param query: elastic query being constructed :param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup) """
|
filters = [f for f in base_filters if f is not None]
query_filter = query['query']['filtered'].get('filter', None)
if query_filter is not None:
if 'and' in query_filter:
filters.extend(query_filter['and'])
else:
filters.append(query_filter)
if filters:
query['query']['filtered']['filter'] = {'and': filters}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_es(url, **kwargs):
"""Create elasticsearch client instance. :param url: elasticsearch url """
|
urls = [url] if isinstance(url, str) else url
kwargs.setdefault('serializer', ElasticJSONSerializer())
es = elasticsearch.Elasticsearch(urls, **kwargs)
return es
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_elastic_query(doc):
""" Build a query which follows ElasticSearch syntax from doc. 1. Converts {"q":"cricket"} to the below elastic query:: { "query": { "filtered": { "query": { "query_string": { "query": "cricket", "lenient": false, "default_operator": "AND" } } } } } 2. Converts a faceted query:: {"q":"cricket", "type":['text'], "source": "AAP"} to the below elastic query:: { "query": { "filtered": { "filter": { "and": [ {"terms": {"type": ["text"]}}, {"term": {"source": "AAP"}} ] }, "query": { "query_string": { "query": "cricket", "lenient": false, "default_operator": "AND" } } } } } :param doc: A document object which is inline with the syntax specified in the examples. It's the developer responsibility to pass right object. :returns ElasticSearch query """
|
elastic_query, filters = {"query": {"filtered": {}}}, []
for key in doc.keys():
if key == 'q':
elastic_query['query']['filtered']['query'] = _build_query_string(doc['q'])
else:
_value = doc[key]
filters.append({"terms": {key: _value}} if isinstance(_value, list) else {"term": {key: _value}})
set_filters(elastic_query, filters)
return elastic_query
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_query_string(q, default_field=None, default_operator='AND'):
""" Build ``query_string`` object from ``q``. :param q: q of type String :param default_field: default_field :return: dictionary object. """
|
def _is_phrase_search(query_string):
clean_query = query_string.strip()
return clean_query and clean_query.startswith('"') and clean_query.endswith('"')
def _get_phrase(query_string):
return query_string.strip().strip('"')
if _is_phrase_search(q):
query = {'match_phrase': {'_all': _get_phrase(q)}}
else:
query = {'query_string': {'query': q, 'default_operator': default_operator}}
query['query_string'].update({'lenient': False} if default_field else {'default_field': default_field})
return query
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default(self, value):
"""Convert mongo.ObjectId."""
|
if isinstance(value, ObjectId):
return str(value)
return super(ElasticJSONSerializer, self).default(value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extra(self, response):
"""Add extra info to response."""
|
if 'facets' in self.hits:
response['_facets'] = self.hits['facets']
if 'aggregations' in self.hits:
response['_aggregations'] = self.hits['aggregations']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_index(self, app=None):
"""Create indexes and put mapping."""
|
elasticindexes = self._get_indexes()
for index, settings in elasticindexes.items():
es = settings['resource']
if not es.indices.exists(index):
self.create_index(index, settings.get('index_settings'), es)
continue
else:
self.put_settings(app, index, settings.get('index_settings').get('settings'), es)
for mapping_type, mappings in settings.get('index_settings', {}).get('mappings').items():
self._put_resource_mapping(mapping_type, es,
properties=mappings,
index=index, doc_type=mapping_type)
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_indexes(self):
"""Based on the resource definition calculates the index definition"""
|
indexes = {}
for resource in self._get_elastic_resources():
try:
index = self._resource_index(resource)
except KeyError: # ignore missing
continue
if index not in indexes:
indexes.update({
index: {
'resource': self.elastic(resource),
'index_settings': {
'mappings': {}
}
}
})
settings = self._resource_config(resource, 'SETTINGS')
if settings:
indexes[index]['index_settings'].update(settings)
resource_config = self.app.config['DOMAIN'][resource]
properties = self._get_mapping_properties(resource_config, parent=self._get_parent_type(resource))
datasource = self.get_datasource(resource)
indexes[index]['index_settings']['mappings'][datasource[0]] = properties
return indexes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_mapping(self, schema):
"""Get mapping for given resource or item schema. :param schema: resource or dict/list type item schema """
|
properties = {}
for field, field_schema in schema.items():
field_mapping = self._get_field_mapping(field_schema)
if field_mapping:
properties[field] = field_mapping
return {'properties': properties}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_field_mapping(self, schema):
"""Get mapping for single field schema. :param schema: field schema """
|
if 'mapping' in schema:
return schema['mapping']
elif schema['type'] == 'dict' and 'schema' in schema:
return self._get_mapping(schema['schema'])
elif schema['type'] == 'list' and 'schema' in schema.get('schema', {}):
return self._get_mapping(schema['schema']['schema'])
elif schema['type'] == 'datetime':
return {'type': 'date'}
elif schema['type'] == 'string' and schema.get('unique'):
return {'type': 'string', 'index': 'not_analyzed'}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_index(self, index=None, settings=None, es=None):
"""Create new index and ignore if it exists already."""
|
if index is None:
index = self.index
if es is None:
es = self.es
try:
alias = index
index = generate_index_name(alias)
args = {'index': index}
if settings:
args['body'] = settings
es.indices.create(**args)
es.indices.put_alias(index, alias)
logger.info('created index alias=%s index=%s' % (alias, index))
except elasticsearch.TransportError: # index exists
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put_mapping(self, app, index=None):
"""Put mapping for elasticsearch for current schema. It's not called automatically now, but rather left for user to call it whenever it makes sense. """
|
for resource, resource_config in self._get_elastic_resources().items():
datasource = resource_config.get('datasource', {})
if not is_elastic(datasource):
continue
if datasource.get('source', resource) != resource: # only put mapping for core types
continue
properties = self._get_mapping_properties(resource_config)
kwargs = {
'index': index or self._resource_index(resource),
'doc_type': resource,
'body': properties,
}
try:
self.elastic(resource).indices.put_mapping(**kwargs)
except elasticsearch.exceptions.RequestError:
logger.exception('mapping error, updating settings resource=%s' % resource)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mapping(self, index, doc_type=None):
"""Get mapping for index. :param index: index name """
|
mapping = self.es.indices.get_mapping(index=index, doc_type=doc_type)
return next(iter(mapping.values()))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_settings(self, index):
"""Get settings for index. :param index: index name """
|
settings = self.es.indices.get_settings(index=index)
return next(iter(settings.values()))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_index_by_alias(self, alias):
"""Get index name for given alias. If there is no alias assume it's an index. :param alias: alias name """
|
try:
info = self.es.indices.get_alias(name=alias)
return next(iter(info.keys()))
except elasticsearch.exceptions.NotFoundError:
return alias
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def should_aggregate(self, req):
"""Check the environment variable and the given argument parameter to decide if aggregations needed. argument value is expected to be '0' or '1' """
|
try:
return self.app.config.get('ELASTICSEARCH_AUTO_AGGREGATIONS') or \
bool(req.args and int(req.args.get('aggregations')))
except (AttributeError, TypeError):
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def should_highlight(self, req):
""" Check the given argument parameter to decide if highlights needed. argument value is expected to be '0' or '1' """
|
try:
return bool(req.args and int(req.args.get('es_highlight', 0)))
except (AttributeError, TypeError):
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def should_project(self, req):
""" Check the given argument parameter to decide if projections needed. argument value is expected to be a list of strings """
|
try:
return req.args and json.loads(req.args.get('projections', []))
except (AttributeError, TypeError):
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_projected_fields(self, req):
""" Returns the projected fields from request. """
|
try:
args = getattr(req, 'args', {})
return ','.join(json.loads(args.get('projections')))
except (AttributeError, TypeError):
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_one(self, resource, req, **lookup):
"""Find single document, if there is _id in lookup use that, otherwise filter."""
|
if config.ID_FIELD in lookup:
return self._find_by_id(resource=resource, _id=lookup[config.ID_FIELD], parent=lookup.get('parent'))
else:
args = self._es_args(resource)
filters = [{'term': {key: val}} for key, val in lookup.items()]
query = {'query': {'constant_score': {'filter': {'and': filters}}}}
try:
args['size'] = 1
hits = self.elastic(resource).search(body=query, **args)
docs = self._parse_hits(hits, resource)
return docs.first()
except elasticsearch.NotFoundError:
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_by_id(self, resource, _id, parent=None):
"""Find the document by Id. If parent is not provided then on routing exception try to find using search. """
|
def is_found(hit):
if 'exists' in hit:
hit['found'] = hit['exists']
return hit.get('found', False)
args = self._es_args(resource)
try:
# set the parent if available
if parent:
args['parent'] = parent
hit = self.elastic(resource).get(id=_id, **args)
if not is_found(hit):
return
docs = self._parse_hits({'hits': {'hits': [hit]}}, resource)
return docs.first()
except elasticsearch.NotFoundError:
return
except elasticsearch.TransportError as tex:
if tex.error == 'routing_missing_exception' or 'RoutingMissingException' in tex.error:
# search for the item
args = self._es_args(resource)
query = {'query': {'bool': {'must': [{'term': {'_id': _id}}]}}}
try:
args['size'] = 1
hits = self.elastic(resource).search(body=query, **args)
docs = self._parse_hits(hits, resource)
return docs.first()
except elasticsearch.NotFoundError:
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_one_raw(self, resource, _id):
"""Find document by id."""
|
return self._find_by_id(resource=resource, _id=_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_list_of_ids(self, resource, ids, client_projection=None):
"""Find documents by ids."""
|
args = self._es_args(resource)
return self._parse_hits(self.elastic(resource).mget(body={'ids': ids}, **args), resource)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert(self, resource, doc_or_docs, **kwargs):
"""Insert document, it must be new if there is ``_id`` in it."""
|
ids = []
kwargs.update(self._es_args(resource))
for doc in doc_or_docs:
self._update_parent_args(resource, kwargs, doc)
_id = doc.pop('_id', None)
res = self.elastic(resource).index(body=doc, id=_id, **kwargs)
doc.setdefault('_id', res.get('_id', _id))
ids.append(doc.get('_id'))
self._refresh_resource_index(resource)
return ids
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bulk_insert(self, resource, docs, **kwargs):
"""Bulk insert documents."""
|
kwargs.update(self._es_args(resource))
parent_type = self._get_parent_type(resource)
if parent_type:
for doc in docs:
if doc.get(parent_type.get('field')):
doc['_parent'] = doc.get(parent_type.get('field'))
res = bulk(self.elastic(resource), docs, stats_only=False, **kwargs)
self._refresh_resource_index(resource)
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, resource, id_, updates):
"""Update document in index."""
|
args = self._es_args(resource, refresh=True)
if self._get_retry_on_conflict():
args['retry_on_conflict'] = self._get_retry_on_conflict()
updates.pop('_id', None)
updates.pop('_type', None)
self._update_parent_args(resource, args, updates)
return self.elastic(resource).update(id=id_, body={'doc': updates}, **args)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace(self, resource, id_, document):
"""Replace document in index."""
|
args = self._es_args(resource, refresh=True)
document.pop('_id', None)
document.pop('_type', None)
self._update_parent_args(resource, args, document)
return self.elastic(resource).index(body=document, id=id_, **args)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove(self, resource, lookup=None, parent=None, **kwargs):
"""Remove docs for resource. :param resource: resource name :param lookup: filter :param parent: parent id """
|
kwargs.update(self._es_args(resource))
if parent:
kwargs['parent'] = parent
if lookup:
if lookup.get('_id'):
try:
return self.elastic(resource).delete(id=lookup.get('_id'), refresh=True, **kwargs)
except elasticsearch.NotFoundError:
return
return ValueError('there must be `lookup._id` specified')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_empty(self, resource):
"""Test if there is no document for resource. :param resource: resource name """
|
args = self._es_args(resource)
res = self.elastic(resource).count(body={'query': {'match_all': {}}}, **args)
return res.get('count', 0) == 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put_settings(self, app=None, index=None, settings=None, es=None):
"""Modify index settings. Index must exist already. """
|
if not index:
index = self.index
if not app:
app = self.app
if not es:
es = self.es
if not settings:
return
for alias, old_settings in self.es.indices.get_settings(index=index).items():
try:
if test_settings_contain(old_settings['settings']['index'], settings['settings']):
return
except KeyError:
pass
es.indices.close(index=index)
es.indices.put_settings(index=index, body=settings)
es.indices.open(index=index)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_hits(self, hits, resource):
"""Parse hits response into documents."""
|
datasource = self.get_datasource(resource)
schema = {}
schema.update(config.DOMAIN[datasource[0]].get('schema', {}))
schema.update(config.DOMAIN[resource].get('schema', {}))
dates = get_dates(schema)
docs = []
for hit in hits.get('hits', {}).get('hits', []):
docs.append(format_doc(hit, schema, dates))
return ElasticCursor(hits, docs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _es_args(self, resource, refresh=None, source_projections=None):
"""Get index and doctype args."""
|
datasource = self.get_datasource(resource)
args = {
'index': self._resource_index(resource),
'doc_type': datasource[0],
}
if source_projections:
args['_source'] = source_projections
if refresh:
args['refresh'] = refresh
return args
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parent_id(self, resource, document):
"""Get the Parent Id of the document :param resource: resource name :param document: document containing the parent id """
|
parent_type = self._get_parent_type(resource)
if parent_type and document:
return document.get(parent_type.get('field'))
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fields(self, resource):
"""Get projection fields for given resource."""
|
datasource = self.get_datasource(resource)
keys = datasource[2].keys()
return ','.join(keys) + ','.join([config.LAST_UPDATED, config.DATE_CREATED])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.