text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose_table_basis(file_relpath, data_dir):
""" Creates a 'table' basis from an table json file This function reads the info from the given file, and reads all the elemental basis set information from the files listed therein. It then composes all the information together into one 'table' basis dictionary Note that the data returned from this function will not be shared, even if the function is called again with the same arguments. """ |
# Do a simple read of the json
file_path = os.path.join(data_dir, file_relpath)
table_bs = fileio.read_json_basis(file_path)
# construct a list of all elemental files to read
element_files = set(table_bs['elements'].values())
# Create a map of the elemental basis data
# (maps file path to data contained in that file)
element_map = {k: compose_elemental_basis(k, data_dir) for k in element_files}
# Replace the basis set for all elements in the table basis with the data
# from the elemental basis
for k, entry in table_bs['elements'].items():
data = element_map[entry]
if k not in data['elements']:
raise KeyError('File {} does not contain element {}'.format(entry, k))
table_bs['elements'][k] = data['elements'][k]
# Add the version to the dictionary
file_base = os.path.basename(file_relpath)
table_bs['version'] = file_base.split('.')[-3]
# Add whether the entire basis is spherical or cartesian
table_bs['function_types'] = _whole_basis_types(table_bs)
# Read and merge in the metadata
# This file must be in the same location as the table file
meta_dirpath, table_filename = os.path.split(file_path)
meta_filename = table_filename.split('.')[0] + '.metadata.json'
meta_filepath = os.path.join(meta_dirpath, meta_filename)
bs_meta = fileio.read_json_basis(meta_filepath)
table_bs.update(bs_meta)
# Remove the molssi schema (which isn't needed here)
table_bs.pop('molssi_bse_schema')
return table_bs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_skel(role):
'''
Create the skeleton of a dictionary or JSON file
A dictionary is returned that contains the "molssi_bse_schema"
key and other required keys, depending on the role
role can be either 'component', 'element', or 'table'
'''
role = role.lower()
if not role in _skeletons:
raise RuntimeError("Role {} not found. Should be 'component', 'element', 'table', or 'metadata'")
return copy.deepcopy(_skeletons[role]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def process_notes(notes, ref_data):
'''Add reference information to the bottom of a notes file
`:ref:` tags are removed and the actual reference data is appended
'''
ref_keys = ref_data.keys()
found_refs = set()
for k in ref_keys:
if k in notes:
found_refs.add(k)
# The block to append
reference_sec = '\n\n'
reference_sec += '-------------------------------------------------\n'
reference_sec += ' REFERENCES MENTIONED ABOVE\n'
reference_sec += ' (not necessarily references for the basis sets)\n'
reference_sec += '-------------------------------------------------\n'
# Add reference data
if len(found_refs) == 0:
return notes
for r in sorted(found_refs):
rtxt = references.reference_text(ref_data[r])
reference_sec += r + '\n'
reference_sec += textwrap.indent(rtxt, ' ' * 4)
reference_sec += '\n\n'
return notes + reference_sec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _validate_extra_component(bs_data):
'''Extra checks for component basis files'''
assert len(bs_data['elements']) > 0
# Make sure size of the coefficient matrix matches the number of exponents
for el in bs_data['elements'].values():
if not 'electron_shells' in el:
continue
for s in el['electron_shells']:
nprim = len(s['exponents'])
if nprim <= 0:
raise RuntimeError("Invalid number of primitives: {}".format(nprim))
for g in s['coefficients']:
if nprim != len(g):
raise RuntimeError("Number of coefficients doesn't match number of primitives ({} vs {}".format(
len(g), nprim))
# If more than one AM is given, that should be the number of
# general contractions
nam = len(s['angular_momentum'])
if nam > 1:
ngen = len(s['coefficients'])
if ngen != nam:
raise RuntimeError("Number of general contractions doesn't match combined AM ({} vs {}".format(
ngen, nam)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_data(file_type, bs_data):
""" Validates json basis set data against a schema Parameters file_type : str Type of file to read. May be 'component', 'element', 'table', or 'references' bs_data: Data to be validated Raises ------ RuntimeError If the file_type is not valid (and/or a schema doesn't exist) ValidationError If the given file does not pass validation FileNotFoundError If the file given by file_path doesn't exist """ |
if file_type not in _validate_map:
raise RuntimeError("{} is not a valid file_type".format(file_type))
schema = api.get_schema(file_type)
jsonschema.validate(bs_data, schema)
_validate_map[file_type](bs_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_file(file_type, file_path):
""" Validates a file against a schema Parameters file_type : str Type of file to read. May be 'component', 'element', 'table', or 'references' file_path: Full path to the file to be validated Raises ------ RuntimeError If the file_type is not valid (and/or a schema doesn't exist) ValidationError If the given file does not pass validation FileNotFoundError If the file given by file_path doesn't exist """ |
file_data = fileio._read_plain_json(file_path, False)
validate_data(file_type, file_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_data_dir(data_dir):
""" Validates all files in a data_dir """ |
all_meta, all_table, all_element, all_component = fileio.get_all_filelist(data_dir)
for f in all_meta:
full_path = os.path.join(data_dir, f)
validate_file('metadata', full_path)
for f in all_table:
full_path = os.path.join(data_dir, f)
validate_file('table', full_path)
for f in all_element:
full_path = os.path.join(data_dir, f)
validate_file('element', full_path)
for f in all_component:
full_path = os.path.join(data_dir, f)
validate_file('component', full_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_basis_dict(bs):
"""Sorts a basis set dictionary into a standard order This, for example, allows the written file to be more easily read by humans by, for example, putting the name and description before more detailed fields. This is generally for cosmetic reasons. However, users will generally like things in a consistent order """ |
# yapf: disable
_keyorder = [
# Schema stuff
'molssi_bse_schema', 'schema_type', 'schema_version',
# Auxiliary block
'jkfit', 'jfit', 'rifit', 'admmfit', 'dftxfit', 'dftjfit',
# Basis set metadata
'name', 'names', 'aliases', 'flags', 'family', 'description', 'role', 'auxiliaries',
'notes', 'function_types',
# Reference stuff
'reference_description', 'reference_keys',
# Version metadata
'version', 'revision_description',
# Sources of components
'data_source',
# Elements and data
'elements', 'references', 'ecp_electrons',
'electron_shells', 'ecp_potentials', 'components',
# Shell information
'function_type', 'region', 'angular_momentum', 'exponents',
'coefficients',
'ecp_type', 'angular_momentum', 'r_exponents', 'gaussian_exponents',
'coefficients'
]
# yapf: enable
# Add integers for the elements (being optimistic that element 150 will be found someday)
_keyorder.extend([str(x) for x in range(150)])
bs_sorted = sorted(bs.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
bs_sorted = OrderedDict(bs_sorted)
else:
bs_sorted = dict(bs_sorted)
for k, v in bs_sorted.items():
# If this is a dictionary, sort recursively
# If this is a list, sort each element but DO NOT sort the list itself.
if isinstance(v, dict):
bs_sorted[k] = sort_basis_dict(v)
elif isinstance(v, list):
# Note - the only nested list is with coeffs, which shouldn't be sorted
# (so we don't have to recurse into lists of lists)
bs_sorted[k] = [sort_basis_dict(x) if isinstance(x, dict) else x for x in v]
return bs_sorted |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_shell(shell, use_copy=True):
""" Sort a basis set shell into a standard order If use_copy is True, the input shells are not modified. """ |
if use_copy:
shell = copy.deepcopy(shell)
# Transpose of coefficients
tmp_c = list(map(list, zip(*shell['coefficients'])))
# For each primitive, find the index of the first nonzero coefficient
nonzero_idx = [next((i for i, x in enumerate(c) if float(x) != 0.0), None) for c in tmp_c]
# Zip together exponents and coeffs for sorting
tmp = zip(shell['exponents'], tmp_c, nonzero_idx)
# Sort by decreasing value of exponent
tmp = sorted(tmp, key=lambda x: -float(x[0]))
# Now (stable) sort by first non-zero coefficient
tmp = sorted(tmp, key=lambda x: int(x[2]))
# Unpack, and re-transpose the coefficients
tmp_c = [x[1] for x in tmp]
shell['exponents'] = [x[0] for x in tmp]
# Now sort the columns of the coefficient by index of first nonzero coefficient
tmp_c = list(map(list, zip(*tmp_c)))
nonzero_idx = [next((i for i, x in enumerate(c) if float(x) != 0.0), None) for c in tmp_c]
tmp = zip(tmp_c, nonzero_idx)
tmp = sorted(tmp, key=lambda x: int(x[1]))
tmp_c = [x[0] for x in tmp]
shell['coefficients'] = tmp_c
return shell |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_shells(shells, use_copy=True):
""" Sort a list of basis set shells into a standard order The order within a shell is by decreasing value of the exponent. The order of the shell list is in increasing angular momentum, and then by decreasing number of primitives, then decreasing value of the largest exponent. If use_copy is True, the input shells are not modified. """ |
if use_copy:
shells = copy.deepcopy(shells)
# Sort primitives within a shell
# (copying already handled above)
shells = [sort_shell(sh, False) for sh in shells]
# Sort the list by increasing AM, then general contraction level, then decreasing highest exponent
return list(
sorted(
shells,
key=lambda x: (max(x['angular_momentum']), -len(x['exponents']), -len(x['coefficients']), -float(
max(x['exponents']))))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_potentials(potentials, use_copy=True):
""" Sort a list of ECP potentials into a standard order The order within a potential is not modified. The order of the shell list is in increasing angular momentum, with the largest angular momentum being moved to the front. If use_copy is True, the input potentials are not modified. """ |
if use_copy:
potentials = copy.deepcopy(potentials)
# Sort by increasing AM, then move the last element to the front
potentials = list(sorted(potentials, key=lambda x: x['angular_momentum']))
potentials.insert(0, potentials.pop())
return potentials |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_basis(basis, use_copy=True):
""" Sorts all the information in a basis set into a standard order If use_copy is True, the input basis set is not modified. """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if 'electron_shells' in el:
el['electron_shells'] = sort_shells(el['electron_shells'], False)
if 'ecp_potentials' in el:
el['ecp_potentials'] = sort_potentials(el['ecp_potentials'], False)
return sort_basis_dict(basis) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_single_reference(ref_entry):
"""Sorts a dictionary containing data for a single reference into a standard order """ |
# yapf: disable
_keyorder = [
# Schema stuff
# This function gets called on the schema 'entry', too
'schema_type', 'schema_version',
# Type of the entry
'type',
# Actual publication info
'authors', 'title', 'booktitle', 'series', 'editors', 'journal',
'institution', 'volume', 'number', 'page', 'year', 'note', 'publisher',
'address', 'isbn', 'doi'
]
# yapf: enable
sorted_entry = sorted(ref_entry.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
return OrderedDict(sorted_entry)
else:
return dict(sorted_entry) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_references_dict(refs):
"""Sorts a reference dictionary into a standard order The keys of the references are also sorted, and the keys for the data for each reference are put in a more canonical order. """ |
if _use_odict:
refs_sorted = OrderedDict()
else:
refs_sorted = dict()
# We insert this first, That is ok - it will be overwritten
# with the sorted version later
refs_sorted['molssi_bse_schema'] = refs['molssi_bse_schema']
# This sorts the entries by reference key (author1985a, etc)
for k, v in sorted(refs.items()):
refs_sorted[k] = sort_single_reference(v)
return refs_sorted |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_dalton(basis_lines, fname):
'''Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if line.lower().startswith('a '):
element_Z = line.split()[1]
i += 1
# Shell am is strictly increasing (I hope)
shell_am = 0
while i < len(basis_lines) and not basis_lines[i].lower().startswith('a '):
line = basis_lines[i]
nprim, ngen = line.split()
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
if not 'electron_shells' in bs_data['elements'][element_Z]:
bs_data['elements'][element_Z]['electron_shells'] = []
element_data = bs_data['elements'][element_Z]
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
i += 1
for _ in range(int(nprim)):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
# Make sure the number of general contractions is >0
# (This error was found in some bad files)
if int(ngen) <= 0:
raise RuntimeError("Number of general contractions is not greater than zero for element " + str(element_Z))
# Make sure the number of general contractions match the heading line
if len(shell['coefficients']) != int(ngen):
raise RuntimeError("Number of general contractions does not equal what was given for element " + str(element_Z))
element_data['electron_shells'].append(shell)
shell_am += 1
return bs_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_range(coeffs):
'''
Find the range in a list of coefficients where the coefficient is nonzero
'''
coeffs = [float(x) != 0 for x in coeffs]
first = coeffs.index(True)
coeffs.reverse()
last = len(coeffs) - coeffs.index(True) - 1
return first, last |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _ref_bib(key, ref):
'''Convert a single reference to bibtex format
'''
s = ''
s += '@{}{{{},\n'.format(ref['type'], key)
entry_lines = []
for k, v in ref.items():
if k == 'type':
continue
# Handle authors/editors
if k == 'authors':
entry_lines.append(' author = {{{}}}'.format(' and '.join(v)))
elif k == 'editors':
entry_lines.append(' editor = {{{}}}'.format(' and '.join(v)))
else:
entry_lines.append(' {} = {{{}}}'.format(k, v))
s += ',\n'.join(entry_lines)
s += '\n}'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_bib(refs):
'''Converts references to bibtex
'''
full_str = ''
lib_citation_desc, lib_citations = get_library_citation()
full_str += '%' * 80 + '\n'
full_str += textwrap.indent(lib_citation_desc, '% ')
full_str += '%' * 80 + '\n\n'
for k, r in lib_citations.items():
full_str += _ref_bib(k, r) + '\n\n'
full_str += '%' * 80 + '\n'
full_str += "% References for the basis set\n"
full_str += '%' * 80 + '\n'
# First, write out the element, description -> key mapping
# Also make a dict of unique reference to output
unique_refs = {}
for ref in refs:
full_str += '% {}\n'.format(compact_elements(ref['elements']))
for ri in ref['reference_info']:
full_str += '% {}\n'.format(ri['reference_description'])
refdata = ri['reference_data']
if len(refdata) == 0:
full_str += '% (...no reference...)\n%\n'
else:
rkeys = [x[0] for x in ri['reference_data']]
full_str += '% {}\n%\n'.format(' '.join(rkeys))
for k, r in refdata:
unique_refs[k] = r
full_str += '\n\n'
# Go through them sorted alphabetically by key
for k, r in sorted(unique_refs.items(), key=lambda x: x[0]):
full_str += '{}\n\n'.format(_ref_bib(k, r))
return full_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_turbomole(basis):
'''Converts a basis set to Gaussian format
'''
s = '$basis\n'
s += '*\n'
# TM basis sets are completely uncontracted
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 0, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if len(electron_elements) > 0:
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, False)
s += '{} {}\n'.format(sym, basis['name'])
s += '*\n'
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += ' {} {}\n'.format(nprim, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=True)
s += '*\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '$ecp\n'
s += '*\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z)
s += '{} {}-ecp\n'.format(sym, basis['name'])
s += '*\n'
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += ' ncore = {} lmax = {}\n'.format(data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
if am[0] == max_ecp_am:
s += '{}\n'.format(amchar)
else:
s += '{}-{}\n'.format(amchar, max_ecp_amchar)
point_places = [9, 23, 32]
s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places, convert_exp=True)
s += '*\n'
s += '$end\n'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compact_references(basis_dict, ref_data):
""" Creates a mapping of elements to reference keys A list is returned, with each element of the list being a dictionary with entries 'reference_info' containing data for (possibly) multiple references, and 'elements' which is a list of element Z numbers that those references apply to Parameters basis_dict : dict Dictionary containing basis set information ref_data : dict Dictionary containing all reference information """ |
element_refs = []
# Create a mapping of elements -> reference information
# (sort by Z first, keeping in mind Z is a string)
sorted_el = sorted(basis_dict['elements'].items(), key=lambda x: int(x[0]))
for el, eldata in sorted_el:
# elref is a list of dict
# dict is { 'reference_description': str, 'reference_keys': [keys] }
elref = eldata['references']
for x in element_refs:
if x['reference_info'] == elref:
x['elements'].append(el)
break
else:
element_refs.append({'reference_info': elref, 'elements': [el]})
for item in element_refs:
# Loop over a list of dictionaries for this group of elements and add the
# actual reference data
# Since we store the keys with the data, we don't need it anymore
for elref in item['reference_info']:
elref['reference_data'] = [(k, ref_data[k]) for k in elref['reference_keys']]
elref.pop('reference_keys')
return element_refs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reference_text(ref):
'''Convert a single reference to plain text format
Parameters
----------
ref : dict
Information about a single reference
'''
ref_wrap = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 8)
s = ''
if ref['type'] == 'unpublished':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title']) + '\n'
s += ref_wrap.fill(ref['note']) + '\n'
elif ref['type'] == 'article':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title']) + '\n'
s += '{}, {}, {} ({})'.format(ref['journal'], ref['volume'], ref['page'], ref['year'])
s += '\n' + ref['doi']
elif ref['type'] == 'incollection':
s += ref_wrap.fill(', '.join(ref['authors']))
s += ref_wrap.fill('\n{}'.format(ref['title']))
s += ref_wrap.fill('\nin \'{}\''.format(ref['booktitle']))
if 'editors' in ref:
s += ref_wrap.fill('\ned. ' + ', '.join(ref['editors']))
if 'series' in ref:
s += '\n{}, {}, {} ({})'.format(ref['series'], ref['volume'], ref['page'], ref['year'])
if 'doi' in ref:
s += '\n' + ref['doi']
elif ref['type'] == 'techreport':
s += ref_wrap.fill(', '.join(ref['authors']))
s += ref_wrap.fill('\n{}'.format(ref['title']))
s += '\n\'{}\''.format(ref['institution'])
s += '\nTechnical Report {}'.format(ref['number'])
s += '\n{}'.format(ref['year'])
if 'doi' in ref:
s += '\n' + ref['doi']
elif ref['type'] == 'misc':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title'])
if 'note' in ref:
s += '\n' + ref['note']
if 'doi' in ref:
s += '\n' + ref['doi']
else:
raise RuntimeError('Cannot handle reference type {}'.format(ref['type']))
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _determine_leftpad(column, point_place):
'''Find how many spaces to put before a column of numbers
so that all the decimal points line up
This function takes a column of decimal numbers, and returns a
vector containing the number of spaces to place before each number
so that (when possible) the decimal points line up.
Parameters
----------
column : list
Numbers that will be printed as a column
point_place : int
Number of the character column to put the decimal point
'''
# Find the number of digits before the decimal
ndigits_left = [_find_point(x) for x in column]
# find the padding per entry, filtering negative numbers
return [max((point_place - 1) - x, 0) for x in ndigits_left] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def electron_shell_str(shell, shellidx=None):
'''Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed
'''
am = shell['angular_momentum']
amchar = lut.amint_to_char(am)
amchar = amchar.upper()
shellidx_str = ''
if shellidx is not None:
shellidx_str = 'Index {} '.format(shellidx)
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s = "Shell: {}Region: {}: AM: {}\n".format(shellidx_str, shell['region'], amchar)
s += "Function: {}\n".format(shell['function_type'])
s += write_matrix([exponents, *coefficients], point_places)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ecp_pot_str(pot):
'''Return a string representing the data for an ECP potential
'''
am = pot['angular_momentum']
amchar = lut.amint_to_char(am)
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
point_places = [0, 10, 33]
s = 'Potential: {} potential\n'.format(amchar)
s += 'Type: {}\n'.format(pot['ecp_type'])
s += write_matrix([rexponents, gexponents, *coefficients], point_places)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_data_str(z, eldata):
'''Return a string with all data for an element
This includes shell and ECP potential data
Parameters
----------
z : int or str
Element Z-number
eldata: dict
Data for the element to be printed
'''
sym = lut.element_sym_from_Z(z, True)
cs = contraction_string(eldata)
if cs == '':
cs = '(no electron shells)'
s = '\nElement: {} : {}\n'.format(sym, cs)
if 'electron_shells' in eldata:
for shellidx, shell in enumerate(eldata['electron_shells']):
s += electron_shell_str(shell, shellidx) + '\n'
if 'ecp_potentials' in eldata:
s += 'ECP: Element: {} Number of electrons: {}\n'.format(sym, eldata['ecp_electrons'])
for pot in eldata['ecp_potentials']:
s += ecp_pot_str(pot) + '\n'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def component_basis_str(basis, elements=None):
'''Print a component basis set
If elements is not None, only the specified elements will be printed
(see :func:`bse.misc.expand_elements`)
'''
s = "Description: " + basis['description'] + '\n'
eldata = basis['elements']
# Filter to the given elements
if elements is None:
elements = list(eldata.keys())
else:
elements = expand_elements(elements, True)
# Add the str for each element
for z in elements:
s += element_data_str(z, eldata[z]) + '\n'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_molpro(basis):
'''Converts a basis set to Molpro format
'''
# Uncontract all, and make as generally-contracted as possible
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'basis={\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
s += '!\n'
s += '! {:20} {}\n'.format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{}, {} , {}\n'.format(amchar, sym, ', '.join(exponents))
for c in coefficients:
first, last = find_range(c)
s += 'c, {}.{}, {}\n'.format(first + 1, last + 1, ', '.join(c[first:last + 1]))
s += '}\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\n! Effective core Potentials\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += 'ECP, {}, {}, {} ;\n'.format(sym, data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{};'.format(len(rexponents))
if am[0] == max_ecp_am:
s += ' ! ul potential\n'
else:
s += ' ! {}-ul potential\n'.format(amchar)
for p in range(len(rexponents)):
s += '{},{},{};\n'.format(rexponents[p], gexponents[p], coefficients[0][p])
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_basis(basis_dict, fmt, header=None):
'''
Returns the basis set data as a string representing
the data in the specified output format
'''
# make converters case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
converter = _converter_map[fmt]
# Determine if the converter supports all the types in the basis_dict
if converter['valid'] is not None:
ftypes = set(basis_dict['function_types'])
if ftypes > converter['valid']:
raise RuntimeError('Converter {} does not support all function types: {}'.format(fmt, str(ftypes)))
# Actually do the conversion
ret_str = converter['function'](basis_dict)
if header is not None and fmt != 'json':
comment_str = _converter_map[fmt]['comment']
header_str = comment_str + comment_str.join(header.splitlines(True))
ret_str = header_str + '\n\n' + ret_str
# HACK - Psi4 requires the first non-comment line be spherical/cartesian
# so we have to add that before the header
if fmt == 'psi4':
types = basis_dict['function_types']
harm_type = 'spherical' if 'spherical_gto' in types else 'cartesian'
ret_str = harm_type + '\n\n' + ret_str
return ret_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_formats(function_types=None):
'''
Returns the available formats mapped to display name.
This is returned as an ordered dictionary, with the most common
at the top, followed by the rest in alphabetical order
If a list is specified for function_types, only those formats
supporting the given function types will be returned.
'''
if function_types is None:
return {k: v['display'] for k, v in _converter_map.items()}
ftypes = [x.lower() for x in function_types]
ftypes = set(ftypes)
ret = []
for fmt, v in _converter_map.items():
if v['valid'] is None or ftypes <= v['valid']:
ret.append(fmt)
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_format_extension(fmt):
'''
Returns the recommended extension for a given format
'''
if fmt is None:
return 'dict'
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
return _converter_map[fmt]['extension'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _make_graph(bsname, version=None, data_dir=None):
'''
Create a DOT graph file of the files included in a basis set
'''
if not graphviz_avail:
raise RuntimeError("graphviz package is not installed")
data_dir = api.fix_data_dir(data_dir)
md = api._get_basis_metadata(bsname, data_dir)
if version is None:
version = md['latest_version']
else:
version = str(version)
if not version in md['versions']:
raise RuntimeError("Version {} of {} doesn't exist".format(version, bsname))
gr = graphviz.Digraph(comment='Basis Set Graph: ' + bsname)
# Read the table file
table_path = os.path.join(data_dir, md['versions'][version]['file_relpath'])
table_data = fileio.read_json_basis(table_path)
table_edges = {}
for el, entry in table_data['elements'].items():
if entry not in table_edges:
table_edges[entry] = []
table_edges[entry].append(el)
for k, v in table_edges.items():
gr.edge(bsname, k, label=compact_elements(v))
# Element file
for elfile in table_edges.keys():
element_path = os.path.join(data_dir, elfile)
element_data = fileio.read_json_basis(element_path)
element_edges = {}
for el, components in element_data['elements'].items():
components = components['components']
components_str = '\n'.join(components)
# skip if this element for the table basis doesn't come from this file
if el not in table_data['elements']:
continue
if table_data['elements'][el] != elfile:
continue
if components_str not in element_edges:
element_edges[components_str] = []
element_edges[components_str].append(el)
for k, v in element_edges.items():
if len(v):
gr.edge(elfile, k, label=compact_elements(v))
return gr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def format_columns(lines, prefix=''):
'''
Create a simple column output
Parameters
----------
lines : list
List of lines to format. Each line is a tuple/list with each
element corresponding to a column
prefix : str
Characters to insert at the beginning of each line
Returns
-------
str
Columnated output as one big string
'''
if len(lines) == 0:
return ''
ncols = 0
for l in lines:
ncols = max(ncols, len(l))
if ncols == 0:
return ''
# We only find the max strlen for all but the last col
maxlen = [0] * (ncols - 1)
for l in lines:
for c in range(ncols - 1):
maxlen[c] = max(maxlen[c], len(l[c]))
fmtstr = prefix + ' '.join(['{{:{x}}}'.format(x=x) for x in maxlen])
fmtstr += ' {}'
return [fmtstr.format(*l) for l in lines] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_nwchem(basis):
'''Converts a basis set to NWChem format
'''
# Uncontract all but SP
basis = manip.uncontract_spdf(basis, 1, True)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'BASIS "ao basis" PRINT\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
s += '#BASIS SET: {}\n'.format(misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).upper()
s += '{} {}\n'.format(sym, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places)
s += 'END\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\nECP\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} nelec {}\n'.format(sym, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).upper()
if am[0] == max_ecp_am:
s += '{} ul\n'.format(sym)
else:
s += '{} {}\n'.format(sym, amchar)
point_places = [0, 10, 33]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places)
s += 'END\n'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contraction_string(element):
""" Forms a string specifying the contractions for an element ie, (16s,10p) -> [4s,3p] """ |
# Does not have electron shells (ECP only?)
if 'electron_shells' not in element:
return ""
cont_map = dict()
for sh in element['electron_shells']:
nprim = len(sh['exponents'])
ngeneral = len(sh['coefficients'])
# is a combined general contraction (sp, spd, etc)
is_spdf = len(sh['angular_momentum']) > 1
for am in sh['angular_momentum']:
# If this a general contraction (and not combined am), then use that
ncont = ngeneral if not is_spdf else 1
if am not in cont_map:
cont_map[am] = (nprim, ncont)
else:
cont_map[am] = (cont_map[am][0] + nprim, cont_map[am][1] + ncont)
primstr = ""
contstr = ""
for am in sorted(cont_map.keys()):
nprim, ncont = cont_map[am]
if am != 0:
primstr += ','
contstr += ','
primstr += str(nprim) + lut.amint_to_char([am])
contstr += str(ncont) + lut.amint_to_char([am])
return "({}) -> [{}]".format(primstr, contstr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand_elements(compact_el, as_str=False):
""" Create a list of integers given a string or list of compacted elements This is partly the opposite of compact_elements, but is more flexible. compact_el can be a list or a string. If compact_el is a list, each element is processed individually as a string (meaning list elements can contain commas, ranges, etc) If compact_el is a string, it is split by commas and then each section is processed. In all cases, element symbols (case insensitive) and Z numbers (as integers or strings) can be used interchangeably. Ranges are also allowed in both lists and strings. Some examples: "H-Li,C-O,Ne" will return [1, 2, 3, 6, 7, 8, 10] "H-N,8,Na-12" will return [1, 2, 3, 4, 5, 6, 7, 8, 11, 12] ['C', 'Al-15,S', 17, '18'] will return [6, 13, 14, 15, 16, 17, 18] If as_str is True, the list will contain strings of the integers (ie, the first example above will return ['1', '2', '3', '6', '7', '8', '10'] """ |
# If an integer, just return it
if isinstance(compact_el, int):
if as_str is True:
return [str(compact_el)]
else:
return [compact_el]
# If compact_el is a list, make it a comma-separated string
if isinstance(compact_el, list):
compact_el = [str(x) for x in compact_el]
compact_el = [x for x in compact_el if len(x) > 0]
compact_el = ','.join(compact_el)
# Find multiple - or ,
# Also replace all whitespace with spaces
compact_el = re.sub(r',+', ',', compact_el)
compact_el = re.sub(r'-+', '-', compact_el)
compact_el = re.sub(r'\s+', '', compact_el)
# Find starting with or ending with comma and strip them
compact_el = compact_el.strip(',')
# Check if I was passed an empty string or list
if len(compact_el) == 0:
return []
# Find some erroneous patterns
# -, and ,-
if '-,' in compact_el:
raise RuntimeError("Malformed element string")
if ',-' in compact_el:
raise RuntimeError("Malformed element string")
# Strings ends or begins with -
if compact_el.startswith('-') or compact_el.endswith('-'):
raise RuntimeError("Malformed element string")
# x-y-z
if re.search(r'\w+-\w+-\w+', compact_el):
raise RuntimeError("Malformed element string")
# Split on commas
tmp_list = compact_el.split(',')
# Now go over each one and replace elements with ints
el_list = []
for el in tmp_list:
if not '-' in el:
el_list.append(_Z_from_str(el))
else:
begin, end = el.split('-')
begin = _Z_from_str(begin)
end = _Z_from_str(end)
el_list.extend(list(range(begin, end + 1)))
if as_str is True:
return [str(x) for x in el_list]
else:
return el_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def elements_in_files(filelist):
'''Get a list of what elements exist in JSON files
This works on table, element, and component data files
Parameters
----------
filelist : list
A list of paths to json files
Returns
-------
dict
Keys are the file path, value is a compacted element string of
what elements are in that file
'''
ret = {}
for fpath in filelist:
filedata = fileio.read_json_basis(fpath)
els = list(filedata['elements'].keys())
ret[fpath] = misc.compact_elements(els)
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _fix_uncontracted(basis):
'''
Forces the contraction coefficient of uncontracted shells to 1.0
'''
for el in basis['elements'].values():
if 'electron_shells' not in el:
continue
for sh in el['electron_shells']:
if len(sh['coefficients']) == 1 and len(sh['coefficients'][0]) == 1:
sh['coefficients'][0][0] = '1.0000000'
# Some uncontracted shells don't have a coefficient
if len(sh['coefficients']) == 0:
sh['coefficients'].append(['1.0000000'])
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_bsedebug(basis):
'''Converts a basis set to BSE Debug format
'''
s = ''
for el, eldata in basis['elements'].items():
s += element_data_str(el, eldata)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_get_reader_formats(args):
'''Handles the get-file-types subcommand'''
all_formats = curate.get_reader_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_elements_in_files(args):
'''Handles the elements-in-files subcommand'''
data = curate.elements_in_files(args.files)
return '\n'.join(format_columns(data.items())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_component_file_refs(args):
'''Handles the component-file-refs subcommand'''
data = curate.component_file_refs(args.files)
s = ''
for cfile, cdata in data.items():
s += cfile + '\n'
rows = []
for el, refs in cdata:
rows.append((' ' + el, ' '.join(refs)))
s += '\n'.join(format_columns(rows)) + '\n\n'
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_print_component_file(args):
'''Handles the print-component-file subcommand'''
data = fileio.read_json_basis(args.file)
return printing.component_basis_str(data, elements=args.elements) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_compare_basis_sets(args):
'''Handles compare-basis-sets subcommand'''
ret = curate.compare_basis_sets(args.basis1, args.basis2, args.version1, args.version2, args.uncontract_general,
args.data_dir, args.data_dir)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_compare_basis_files(args):
'''Handles compare-basis-files subcommand'''
ret = curate.compare_basis_files(args.file1, args.file2, args.readfmt1, args.readfmt2, args.uncontract_general)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_view_graph(args):
'''Handles the view-graph subcommand'''
curate.view_graph(args.basis, args.version, args.data_dir)
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bsecurate_cli_make_graph_file(args):
'''Handles the make-graph-file subcommand'''
curate.make_graph_file(args.basis, args.outfile, args.render, args.version, args.data_dir)
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_data_from_Z(Z):
'''Obtain elemental data given a Z number
An exception is thrown if the Z number is not found
'''
# Z may be a str
if isinstance(Z, str) and Z.isdecimal():
Z = int(Z)
if Z not in _element_Z_map:
raise KeyError('No element data for Z = {}'.format(Z))
return _element_Z_map[Z] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_data_from_sym(sym):
'''Obtain elemental data given an elemental symbol
The given symbol is not case sensitive
An exception is thrown if the symbol is not found
'''
sym_lower = sym.lower()
if sym_lower not in _element_sym_map:
raise KeyError('No element data for symbol \'{}\''.format(sym))
return _element_sym_map[sym_lower] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_data_from_name(name):
'''Obtain elemental data given an elemental name
The given name is not case sensitive
An exception is thrown if the name is not found
'''
name_lower = name.lower()
if name_lower not in _element_name_map:
raise KeyError('No element data for name \'{}\''.format(name))
return _element_name_map[name_lower] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_name_from_Z(Z, normalize=False):
'''Obtain an element's name from its Z number
An exception is thrown if the Z number is not found
If normalize is True, the first letter will be capitalized
'''
r = element_data_from_Z(Z)[2]
if normalize:
return r.capitalize()
else:
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def element_sym_from_Z(Z, normalize=False):
'''Obtain an element's symbol from its Z number
An exception is thrown if the Z number is not found
If normalize is True, the first letter will be capitalized
'''
r = element_data_from_Z(Z)[0]
if normalize:
return r.capitalize()
else:
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_references(ref_data, fmt):
'''
Returns the basis set references as a string representing
the data in the specified output format
'''
# Make fmt case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown reference format "{}"'.format(fmt))
# Sort the data for all references
for elref in ref_data:
for rinfo in elref['reference_info']:
rdata = rinfo['reference_data']
rinfo['reference_data'] = [(k, sort.sort_single_reference(v)) for k, v in rdata]
# Actually do the conversion
ret_str = _converter_map[fmt]['function'](ref_data)
return ret_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_basis_metadata(name, data_dir):
'''Get metadata for a single basis set
If the basis doesn't exist, an exception is raised
'''
# Transform the name into an internal representation
tr_name = misc.transform_basis_name(name)
# Get the metadata for all basis sets
metadata = get_metadata(data_dir)
if not tr_name in metadata:
raise KeyError("Basis set {} does not exist".format(name))
return metadata[tr_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _header_string(basis_dict):
'''Creates a header with information about a basis set
Information includes description, revision, etc, but not references
'''
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'],
basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_basis(name,
elements=None,
version=None,
fmt=None,
uncontract_general=False,
uncontract_spdf=False,
uncontract_segmented=False,
make_general=False,
optimize_general=False,
data_dir=None,
header=True):
'''Obtain a basis set
This is the main function for getting basis set information.
This function reads in all the basis data and returns it either
as a string or as a python dictionary.
Parameters
----------
name : str
Name of the basis set. This is not case sensitive.
elements : str or list
List of elements that you want the basis set for.
Elements can be specified by Z-number (int or str) or by symbol (str).
If this argument is a str (ie, '1-3,7-10'), it is expanded into a list.
Z numbers and symbols (case insensitive) can be used interchangeably
(see :func:`bse.misc.expand_elements`)
If an empty string or list is passed, or if None is passed (the default),
all elements for which the basis set is defined are included.
version : int or str
Obtain a specific version of this basis set. By default,
the latest version is returned.
fmt: str
The desired output format of the basis set. By default,
basis set information is returned as a python dictionary. Otherwise,
if a format is specified, a string is returned.
Use :func:`bse.api.get_formats` to programmatically obtain the available
formats. The `fmt` argument is not case sensitive.
Available formats are
* nwchem
* gaussian94
* psi4
* gamess_us
* turbomole
* json
uncontract_general : bool
If True, remove general contractions by duplicating the set
of primitive exponents with each vector of coefficients.
Primitives with zero coefficient are removed, as are duplicate shells.
uncontract_spdf : bool
If True, remove general contractions with combined angular momentum (sp, spd, etc)
by duplicating the set of primitive exponents with each vector of coefficients.
Primitives with zero coefficient are removed, as are duplicate shells.
uncontract_segmented : bool
If True, remove segmented contractions by duplicating each primitive into new shells.
Each coefficient is set to 1.0
make_general : bool
If True, make the basis set as generally-contracted as possible. There will be one
shell per angular momentum (for each element)
optimize_general : bool
Optimize by removing general contractions that contain uncontracted
functions (see :func:`bse.manip.optimize_general`)
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
str or dict
The basis set in the desired format. If `fmt` is **None**, this will be a python
dictionary. Otherwise, it will be a string.
'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# If version is not specified, use the latest
if version is None:
version = bs_data['latest_version']
else:
version = str(version) # Version may be an int
if not version in bs_data['versions']:
raise KeyError("Version {} does not exist for basis {}".format(version, name))
# Compose the entire basis set (all elements)
file_relpath = bs_data['versions'][version]['file_relpath']
basis_dict = compose.compose_table_basis(file_relpath, data_dir)
# Set the name (from the global metadata)
# Only the list of all names will be returned from compose_table_basis
basis_dict['name'] = bs_data['display_name']
# Handle optional arguments
if elements is not None:
# Convert to purely a list of strings that represent integers
elements = misc.expand_elements(elements, True)
# Did the user pass an empty string or empty list? If so, include
# all elements
if len(elements) != 0:
bs_elements = basis_dict['elements']
# Are elements part of this basis set?
for el in elements:
if not el in bs_elements:
elsym = lut.element_sym_from_Z(el)
raise KeyError("Element {} (Z={}) not found in basis {} version {}".format(
elsym, el, name, version))
# Set to only the elements we want
basis_dict['elements'] = {k: v for k, v in bs_elements.items() if k in elements}
# Note that from now on, the pipleline is going to modify basis_dict. That is ok,
# since we are returned a unique instance from compose_table_basis
needs_pruning = False
if optimize_general:
basis_dict = manip.optimize_general(basis_dict, False)
needs_pruning = True
# uncontract_segmented implies uncontract_general
if uncontract_segmented:
basis_dict = manip.uncontract_segmented(basis_dict, False)
needs_pruning = True
elif uncontract_general:
basis_dict = manip.uncontract_general(basis_dict, False)
needs_pruning = True
if uncontract_spdf:
basis_dict = manip.uncontract_spdf(basis_dict, 0, False)
needs_pruning = True
if make_general:
basis_dict = manip.make_general(basis_dict, False)
needs_pruning = True
# Remove dead and duplicate shells
if needs_pruning:
basis_dict = manip.prune_basis(basis_dict, False)
# If fmt is not specified, return as a python dict
if fmt is None:
return basis_dict
if header:
header_str = _header_string(basis_dict)
else:
header_str = None
return converters.convert_basis(basis_dict, fmt, header_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def lookup_basis_by_role(primary_basis, role, data_dir=None):
'''Lookup the name of an auxiliary basis set given a primary basis set and role
Parameters
----------
primary_basis : str
The primary (orbital) basis set that we want the auxiliary
basis set for. This is not case sensitive.
role: str
Desired role/type of auxiliary basis set.
Use :func:`bse.api.get_roles` to programmatically obtain the available
formats. The `fmt` argument is not case sensitive.
Available roles are
* jfit
* jkfit
* rifit
* admmfit
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
str
The name of the auxiliary basis set for the given primary basis
and role.
'''
data_dir = fix_data_dir(data_dir)
role = role.lower()
if not role in get_roles():
raise RuntimeError("Role {} is not a valid role".format(role))
bs_data = _get_basis_metadata(primary_basis, data_dir)
auxdata = bs_data['auxiliaries']
if not role in auxdata:
raise RuntimeError("Role {} doesn't exist for {}".format(role, primary_basis))
return auxdata[role] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_metadata(data_dir=None):
'''Obtain the metadata for all basis sets
The metadata includes information such as the display name of the basis set,
its versions, and what elements are included in the basis set
The data is read from the METADATA.json file in the `data_dir` directory.
Parameters
----------
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
'''
data_dir = fix_data_dir(data_dir)
metadata_file = os.path.join(data_dir, "METADATA.json")
return fileio.read_metadata(metadata_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_reference_data(data_dir=None):
'''Obtain information for all stored references
This is a nested dictionary with all the data for all the references
The reference data is read from the REFERENCES.json file in the given
`data_dir` directory.
'''
data_dir = fix_data_dir(data_dir)
reffile_path = os.path.join(data_dir, 'REFERENCES.json')
return fileio.read_references(reffile_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_basis_family(basis_name, data_dir=None):
'''Lookup a family by a basis set name
'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(basis_name, data_dir)
return bs_data['family'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_families(data_dir=None):
'''Return a list of all basis set families'''
data_dir = fix_data_dir(data_dir)
metadata = get_metadata(data_dir)
families = set()
for v in metadata.values():
families.add(v['family'])
return sorted(list(families)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def filter_basis_sets(substr=None, family=None, role=None, data_dir=None):
'''Filter basis sets by some criteria
All parameters are ANDed together and are not case sensitive.
Parameters
----------
substr : str
Substring to search for in the basis set name
family : str
Family the basis set belongs to
role : str
Role of the basis set
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
dict
Basis set metadata that matches the search criteria
'''
data_dir = fix_data_dir(data_dir)
metadata = get_metadata(data_dir)
# family and role are required to be lowercase (via schema and validation functions)
if family:
family = family.lower()
if not family in get_families(data_dir):
raise RuntimeError("Family '{}' is not a valid family".format(family))
metadata = {k: v for k, v in metadata.items() if v['family'] == family}
if role:
role = role.lower()
if not role in get_roles():
raise RuntimeError("Role '{}' is not a valid role".format(role))
metadata = {k: v for k, v in metadata.items() if v['role'] == role}
if substr:
substr = substr.lower()
metadata = {k: v for k, v in metadata.items() if substr in k or substr in v['display_name']}
return metadata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _family_notes_path(family, data_dir):
'''Form a path to the notes for a family'''
data_dir = fix_data_dir(data_dir)
family = family.lower()
if not family in get_families(data_dir):
raise RuntimeError("Family '{}' does not exist".format(family))
file_name = 'NOTES.' + family.lower()
file_path = os.path.join(data_dir, file_name)
return file_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _basis_notes_path(name, data_dir):
'''Form a path to the notes for a basis set'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# the notes file is the same as the base file name, with a .notes extension
filebase = bs_data['basename']
file_path = os.path.join(data_dir, filebase + '.notes')
return file_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_family_notes(family, data_dir=None):
'''Return a string representing the notes about a basis set family
If the notes are not found, an empty string is returned
'''
file_path = _family_notes_path(family, data_dir)
notes_str = fileio.read_notes_file(file_path)
if notes_str is None:
notes_str = ""
ref_data = get_reference_data(data_dir)
return notes.process_notes(notes_str, ref_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def has_family_notes(family, data_dir=None):
'''Check if notes exist for a given family
Returns True if they exist, false otherwise
'''
file_path = _family_notes_path(family, data_dir)
return os.path.isfile(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_basis_notes(name, data_dir=None):
'''Return a string representing the notes about a specific basis set
If the notes are not found, an empty string is returned
'''
file_path = _basis_notes_path(name, data_dir)
notes_str = fileio.read_notes_file(file_path)
if notes_str is None:
return ""
ref_data = get_reference_data(data_dir)
return notes.process_notes(notes_str, ref_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def has_basis_notes(family, data_dir=None):
'''Check if notes exist for a given basis set
Returns True if they exist, false otherwise
'''
file_path = _basis_notes_path(family, data_dir)
return os.path.isfile(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_schema(schema_type):
'''Get a schema that can validate BSE JSON files
The schema_type represents the type of BSE JSON file to be validated,
and can be 'component', 'element', 'table', 'metadata', or 'references'.
'''
schema_file = "{}-schema.json".format(schema_type)
file_path = os.path.join(_default_schema_dir, schema_file)
if not os.path.isfile(file_path):
raise RuntimeError('Schema file \'{}\' does not exist, is not readable, or is not a file'.format(file_path))
return fileio.read_schema(file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_data_dir(data_dir):
'''Checks that the data dir exists and contains METADATA.json'''
if data_dir is None:
return None
data_dir = os.path.expanduser(data_dir)
data_dir = os.path.expandvars(data_dir)
if not os.path.isdir(data_dir):
raise RuntimeError("Data directory '{}' does not exist or is not a directory".format(data_dir))
if not os.path.isfile(os.path.join(data_dir, 'METADATA.json')):
raise RuntimeError("Data directory '{}' does not contain a METADATA.json file".format(data_dir))
return data_dir |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_format(fmt):
'''Checks that a basis set format exists and if not, raises a helpful exception'''
if fmt is None:
return None
fmt = fmt.lower()
if not fmt in api.get_formats():
errstr = "Format '" + fmt + "' does not exist.\n"
errstr += "For a complete list of formats, use the 'bse list-formats' command"
raise RuntimeError(errstr)
return fmt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_ref_format(fmt):
'''Checks that a reference format exists and if not, raises a helpful exception'''
if fmt is None:
return None
fmt = fmt.lower()
if not fmt in api.get_reference_formats():
errstr = "Reference format '" + fmt + "' does not exist.\n"
errstr += "For a complete list of formats, use the 'bse list-ref-formats' command"
raise RuntimeError(errstr)
return fmt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_role(role):
'''Checks that a basis set role exists and if not, raises a helpful exception'''
if role is None:
return None
role = role.lower()
if not role in api.get_roles():
errstr = "Role format '" + role + "' does not exist.\n"
errstr += "For a complete list of roles, use the 'bse list-roles' command"
raise RuntimeError(errstr)
return role |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_basis(name, data_dir):
'''Checks that a basis set exists and if not, raises a helpful exception'''
if name is None:
return None
name = misc.transform_basis_name(name)
metadata = api.get_metadata(data_dir)
if not name in metadata:
errstr = "Basis set '" + name + "' does not exist.\n"
errstr += "For a complete list of basis sets, use the 'bse list-basis-sets' command"
raise RuntimeError(errstr)
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_family(family, data_dir):
'''Checks that a basis set family exists and if not, raises a helpful exception'''
if family is None:
return None
family = family.lower()
if not family in api.get_families(data_dir):
errstr = "Basis set family '" + family + "' does not exist.\n"
errstr += "For a complete list of families, use the 'bse list-families' command"
raise RuntimeError(errstr)
return family |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _cli_check_readfmt(readfmt):
'''Checks that a file type exists and if not, raises a helpful exception'''
if readfmt is None:
return None
readfmt = readfmt.lower()
if not readfmt in curate.get_reader_formats():
errstr = "Reader for file type '" + readfmt + "' does not exist.\n"
errstr += "For a complete list of file types, use the 'bsecurate get-reader-formats' command"
raise RuntimeError(errstr)
return readfmt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _create_readme(fmt, reffmt):
'''
Creates the readme file for the bundle
Returns a str representing the readme file
'''
now = datetime.datetime.utcnow()
timestamp = now.strftime('%Y-%m-%d %H:%M:%S UTC')
# yapf: disable
outstr = _readme_str.format(timestamp=timestamp,
bsever=api.version(),
fmt=fmt, reffmt=reffmt)
# yapf: enable
return outstr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _add_to_tbz(tfile, filename, data_str):
'''
Adds string data to a tarfile
'''
# Create a bytesio object for adding to a tarfile
# https://stackoverflow.com/a/52724508
encoded_data = data_str.encode('utf-8')
ti = tarfile.TarInfo(name=filename)
ti.size = len(encoded_data)
tfile.addfile(tarinfo=ti, fileobj=io.BytesIO(encoded_data)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _bundle_generic(bfile, addhelper, fmt, reffmt, data_dir):
'''
Loop over all basis sets and add data to an archive
Parameters
----------
bfile : object
An object that gets passed through to the addhelper function
addhelper : function
A function that takes bfile and adds data to the bfile
fmt : str
Format of the basis set to create
reffmt : str
Format to use for the references
data_dir : str
Data directory with all the basis set information.
Returns
-------
None
'''
ext = converters.get_format_extension(fmt)
refext = refconverters.get_format_extension(reffmt)
subdir = 'basis_set_bundle-' + fmt + '-' + reffmt
readme_path = os.path.join(subdir, 'README.txt')
addhelper(bfile, readme_path, _create_readme(fmt, reffmt))
for name, data, notes in _basis_data_iter(fmt, reffmt, data_dir):
for ver, verdata in data.items():
filename = misc.basis_name_to_filename(name)
basis_filepath = os.path.join(subdir, '{}.{}{}'.format(filename, ver, ext))
ref_filename = os.path.join(subdir, '{}.{}.ref{}'.format(filename, ver, refext))
bsdata, refdata = verdata
addhelper(bfile, basis_filepath, bsdata)
addhelper(bfile, ref_filename, refdata)
if len(notes) > 0:
notes_filename = os.path.join(subdir, filename + '.notes')
addhelper(bfile, notes_filename, notes)
for fam in api.get_families(data_dir):
fam_notes = api.get_family_notes(fam, data_dir)
if len(fam_notes) > 0:
fam_notes_filename = os.path.join(subdir, fam + '.family_notes')
addhelper(bfile, fam_notes_filename, fam_notes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_bundle(outfile, fmt, reffmt, archive_type=None, data_dir=None):
'''
Create a single archive file containing all basis
sets in a given format
Parameters
----------
outfile : str
Path to the file to create. Existing files will be overwritten
fmt : str
Format of the basis set to archive (nwchem, turbomole, ...)
reffmt : str
Format of the basis set references to archive (nwchem, turbomole, ...)
archive_type : str
Type of archive to create. Can be 'zip' or 'tbz'. Default is
None, which will autodetect based on the outfile name
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
None
'''
if archive_type is None:
outfile_lower = outfile.lower()
for k, v in _bundle_types.items():
if outfile_lower.endswith(v['extension']):
archive_type = k
break
else:
raise RuntimeError("Cannot autodetect archive type from file name: {}".format(os.path.basename(outfile)))
else:
archive_type = archive_type.lower()
if not archive_type in _bundle_types:
raise RuntimeError("Archive type '{}' is not valid.")
_bundle_types[archive_type]['handler'](outfile, fmt, reffmt, data_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_archive_types():
'''
Return information related to the types of archives available
'''
ret = copy.deepcopy(_bundle_types)
for k, v in ret.items():
v.pop('handler')
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_element_data(dest, sources, use_copy=True):
""" Merges the basis set data for an element from multiple sources into dest. The destination is not modified, and a (shallow) copy of dest is returned with the data from sources added. If use_copy is True, then the data merged into dest will be a (deep) copy of that found in sources. Otherwise, data may be shared between dest and sources """ |
if dest is not None:
ret = dest.copy()
else:
ret = {}
if use_copy:
sources = copy.deepcopy(sources)
# Note that we are not copying notes/data_sources
for s in sources:
if 'electron_shells' in s:
if 'electron_shells' not in ret:
ret['electron_shells'] = []
ret['electron_shells'].extend(s['electron_shells'])
if 'ecp_potentials' in s:
if 'ecp_potentials' in ret:
raise RuntimeError('Cannot overwrite existing ECP')
ret['ecp_potentials'] = s['ecp_potentials']
ret['ecp_electrons'] = s['ecp_electrons']
if 'references' in s:
if 'references' not in ret:
ret['references'] = []
for ref in s['references']:
if not ref in ret['references']:
ret['references'].append(ref)
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prune_shell(shell, use_copy=True):
""" Removes exact duplicates of primitives, and condenses duplicate exponents into general contractions Also removes primitives if all coefficients are zero """ |
new_exponents = []
new_coefficients = []
exponents = shell['exponents']
nprim = len(exponents)
# transpose of the coefficient matrix
coeff_t = list(map(list, zip(*shell['coefficients'])))
# Group by exponents
ex_groups = []
for i in range(nprim):
for ex in ex_groups:
if float(exponents[i]) == float(ex[0]):
ex[1].append(coeff_t[i])
break
else:
ex_groups.append((exponents[i], [coeff_t[i]]))
# Now collapse within groups
for ex in ex_groups:
if len(ex[1]) == 1:
# only add if there is a nonzero contraction coefficient
if not all([float(x) == 0.0 for x in ex[1][0]]):
new_exponents.append(ex[0])
new_coefficients.append(ex[1][0])
continue
# ex[1] contains rows of coefficients. The length of ex[1]
# is the number of times the exponent is duplicated. Columns represent general contractions.
# We want to find the non-zero coefficient in each column, if it exists
# The result is a single row with a length representing the number
# of general contractions
new_coeff_row = []
# so take yet another transpose.
ex_coeff = list(map(list, zip(*ex[1])))
for g in ex_coeff:
nonzero = [x for x in g if float(x) != 0.0]
if len(nonzero) > 1:
raise RuntimeError("Exponent {} is duplicated within a contraction".format(ex[0]))
if len(nonzero) == 0:
new_coeff_row.append(g[0])
else:
new_coeff_row.append(nonzero[0])
# only add if there is a nonzero contraction coefficient anywhere for this exponent
if not all([float(x) == 0.0 for x in new_coeff_row]):
new_exponents.append(ex[0])
new_coefficients.append(new_coeff_row)
# take the transpose again, putting the general contraction
# as the slowest index
new_coefficients = list(map(list, zip(*new_coefficients)))
shell['exponents'] = new_exponents
shell['coefficients'] = new_coefficients
return shell |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prune_basis(basis, use_copy=True):
""" Removes primitives that have a zero coefficient, and removes duplicate primitives and shells This only finds EXACT duplicates, and is meant to be used after other manipulations If use_copy is True, the input basis set is not modified. """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
shells = el.pop('electron_shells')
shells = [prune_shell(sh, False) for sh in shells]
# Remove any duplicates
el['electron_shells'] = []
for sh in shells:
if sh not in el['electron_shells']:
el['electron_shells'].append(sh)
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uncontract_spdf(basis, max_am=0, use_copy=True):
""" Removes sp, spd, spdf, etc, contractions from a basis set The general contractions are replaced by uncontracted versions Contractions up to max_am will be left in place. For example, if max_am = 1, spd will be split into sp and d The input basis set is not modified. The returned basis may have functions with coefficients of zero and may have duplicate shells. If use_copy is True, the input basis set is not modified. """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# am will be a list
am = sh['angular_momentum']
coeff = sh['coefficients']
# if this is an sp, spd,... orbital
if len(am) > 1:
newsh = sh.copy()
newsh['angular_momentum'] = []
newsh['coefficients'] = []
ngen = len(sh['coefficients'])
for g in range(ngen):
if am[g] > max_am:
newsh2 = sh.copy()
newsh2['angular_momentum'] = [am[g]]
newsh2['coefficients'] = [coeff[g]]
newshells.append(newsh2)
else:
newsh['angular_momentum'].append(am[g])
newsh['coefficients'].append(coeff[g])
newshells.insert(0, newsh)
else:
newshells.append(sh)
el['electron_shells'] = newshells
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uncontract_general(basis, use_copy=True):
""" Removes the general contractions from a basis set The input basis set is not modified. The returned basis may have functions with coefficients of zero and may have duplicate shells. If use_copy is True, the input basis set is not modified. """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uncontract_segmented(basis, use_copy=True):
""" Removes the segmented contractions from a basis set This implicitly removes general contractions as well, The input basis set is not modified. The returned basis may have functions with coefficients of zero and may have duplicate shells. If use_copy is True, the input basis set is not modified. """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
exponents = sh['exponents']
nam = len(sh['angular_momentum'])
for i in range(len(exponents)):
newsh = sh.copy()
newsh['exponents'] = [exponents[i]]
newsh['coefficients'] = [["1.00000000"] * nam]
# Remember to transpose the coefficients
newsh['coefficients'] = list(map(list, zip(*newsh['coefficients'])))
newshells.append(newsh)
el['electron_shells'] = newshells
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_general(basis, use_copy=True):
""" Makes one large general contraction for each angular momentum If use_copy is True, the input basis set is not modified. The output of this function is not pretty. If you want to make it nicer, use sort_basis afterwards. """ |
zero = '0.00000000'
basis = uncontract_spdf(basis, 0, use_copy)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
# See what we have
all_am = []
for sh in el['electron_shells']:
if not sh['angular_momentum'] in all_am:
all_am.append(sh['angular_momentum'])
all_am = sorted(all_am)
newshells = []
for am in all_am:
newsh = {
'angular_momentum': am,
'exponents': [],
'coefficients': [],
'region': '',
'function_type': None,
}
# Do exponents first
for sh in el['electron_shells']:
if sh['angular_momentum'] != am:
continue
newsh['exponents'].extend(sh['exponents'])
# Number of primitives in the new shell
nprim = len(newsh['exponents'])
cur_prim = 0
for sh in el['electron_shells']:
if sh['angular_momentum'] != am:
continue
if newsh['function_type'] is None:
newsh['function_type'] = sh['function_type']
# Make sure the shells we are merging have the same function types
ft1 = newsh['function_type']
ft2 = sh['function_type']
# Check if one function type is the subset of another
# (should handle gto/gto_spherical, etc)
if ft1 not in ft2 and ft2 not in ft1:
raise RuntimeError("Cannot make general contraction of different function types")
ngen = len(sh['coefficients'])
for g in range(ngen):
coef = [zero] * cur_prim
coef.extend(sh['coefficients'][g])
coef.extend([zero] * (nprim - len(coef)))
newsh['coefficients'].append(coef)
cur_prim += len(sh['exponents'])
newshells.append(newsh)
el['electron_shells'] = newshells
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optimize_general(basis, use_copy=True):
""" Optimizes the general contraction using the method of Hashimoto et al .. seealso :: | T. Hashimoto, K. Hirao, H. Tatewaki | 'Comment on Dunning's correlation-consistent basis set' | Chemical Physics Letters v243, Issues 1-2, pp, 190-192 (1995) | https://doi.org/10.1016/0009-2614(95)00807-G """ |
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
elshells = el.pop('electron_shells')
el['electron_shells'] = []
for sh in elshells:
exponents = sh['exponents']
coefficients = sh['coefficients']
nprim = len(exponents)
nam = len(sh['angular_momentum'])
if nam > 1 or len(coefficients) < 2:
el['electron_shells'].append(sh)
continue
# First, find columns (general contractions) with a single non-zero value
single_columns = [idx for idx, c in enumerate(coefficients) if _is_single_column(c)]
# Find the corresponding rows that have a value in one of these columns
# Note that at this stage, the row may have coefficients in more than one
# column. That is ok, we are going to split it off anyway
single_rows = []
for col_idx in single_columns:
col = coefficients[col_idx]
for row_idx in range(nprim):
if float(col[row_idx]) != 0.0:
single_rows.append(row_idx)
# Split those out into new shells, and remove them from the
# original shell
new_shells_single = []
for row_idx in single_rows:
newsh = copy.deepcopy(sh)
newsh['exponents'] = [exponents[row_idx]]
newsh['coefficients'] = [['1.00000000000']]
new_shells_single.append(newsh)
exponents = [x for idx, x in enumerate(exponents) if idx not in single_rows]
coefficients = [x for idx, x in enumerate(coefficients) if idx not in single_columns]
coefficients = [[x for idx, x in enumerate(col) if not idx in single_rows] for col in coefficients]
# Remove Zero columns
#coefficients = [ x for x in coefficients if not _is_zero_column(x) ]
# Find contiguous rectanglar blocks
new_shells = []
while len(exponents) > 0:
block_rows, block_cols = _find_block(coefficients)
# add as a new shell
newsh = copy.deepcopy(sh)
newsh['exponents'] = [exponents[i] for i in block_rows]
newsh['coefficients'] = [[coefficients[colidx][i] for i in block_rows] for colidx in block_cols]
new_shells.append(newsh)
# Remove from the original exponent/coefficient set
exponents = [x for idx, x in enumerate(exponents) if idx not in block_rows]
coefficients = [x for idx, x in enumerate(coefficients) if idx not in block_cols]
coefficients = [[x for idx, x in enumerate(col) if not idx in block_rows] for col in coefficients]
# I do this order to mimic the output of the original BSE
el['electron_shells'].extend(new_shells)
el['electron_shells'].extend(new_shells_single)
# Fix coefficients for completely uncontracted shells to 1.0
for sh in el['electron_shells']:
if len(sh['coefficients']) == 1 and len(sh['coefficients'][0]) == 1:
sh['coefficients'][0][0] = '1.0000000'
return basis |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _reldiff(a, b):
""" Computes the relative difference of two floating-point numbers rel = abs(a-b)/min(abs(a), abs(b)) If a == 0 and b == 0, then 0.0 is returned Otherwise if a or b is 0.0, inf is returned. """ |
a = float(a)
b = float(b)
aa = abs(a)
ba = abs(b)
if a == 0.0 and b == 0.0:
return 0.0
elif a == 0 or b == 0.0:
return float('inf')
return abs(a - b) / min(aa, ba) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compare_keys(element1, element2, key, compare_func, *args):
""" Compares a specific key between two elements of a basis set If the key exists in one element but not the other, False is returned. If the key exists in neither element, True is returned. Parameters element1 : dict Basis info for an element element2 : dict Basis info for another element key : string Key to compare in the two elements compare_func : function Function that returns True if the data under the key is equivalent in both elements args Additional arguments to be passed to compare_Func """ |
if key in element1 and key in element2:
if not compare_func(element1[key], element2[key], *args):
return False
elif key in element1 or key in element2:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def electron_shells_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of electron shells is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The shells are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_electron_shells(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ecp_pots_are_subset(subset, superset, compare_meta=False, rel_tol=0.0):
'''
Determine if a list of ecp potentials is a subset of another
If 'subset' is a subset of the 'superset', True is returned.
The potentials are compared approximately (exponents/coefficients are
within a tolerance)
If compare_meta is True, the metadata is also compared for exact equality.
'''
for item1 in subset:
for item2 in superset:
if compare_ecp_pots(item1, item2, compare_meta, rel_tol):
break
else:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compare_elements(element1,
element2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if the basis information for two elements is the same as another
Exponents/coefficients are compared using a tolerance.
Parameters
----------
element1 : dict
Basis information for an element
element2 : dict
Basis information for another element
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_meta : bool
Compare the overall element metadata
rel_tol : float
Maximum relative error that is considered equal
'''
if not _compare_keys(element1, element2, 'electron_shells', electron_shells_are_equal,
compare_electron_shells_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_potentials', ecp_pots_are_equal, compare_ecp_pots_meta, rel_tol):
return False
if not _compare_keys(element1, element2, 'ecp_electrons', operator.eq):
return False
if compare_meta:
if not _compare_keys(element1, element2, 'references', operator.eq):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compare_basis(bs1,
bs2,
compare_electron_shells_meta=False,
compare_ecp_pots_meta=False,
compare_elements_meta=False,
compare_meta=False,
rel_tol=0.0):
'''
Determine if two basis set dictionaries are the same
bs1 : dict
Full basis information
bs2 : dict
Full basis information
compare_electron_shells_meta : bool
Compare the metadata of electron shells
compare_ecp_pots_meta : bool
Compare the metadata of ECP potentials
compare_elements_meta : bool
Compare the overall element metadata
compare_meta: bool
Compare the metadata for the basis set (name, description, etc)
rel_tol : float
Maximum relative error that is considered equal
'''
els1 = sorted(list(bs1['elements'].keys()))
els2 = sorted(list(bs2['elements'].keys()))
if not els1 == els2:
return False
for el in els1:
if not compare_elements(
bs1['elements'][el],
bs2['elements'][el],
compare_electron_shells_meta=compare_electron_shells_meta,
compare_ecp_pots_meta=compare_ecp_pots_meta,
compare_meta=compare_elements_meta,
rel_tol=rel_tol):
print("Element failed:", el)
return False
if compare_meta:
for k in ['name', 'family', 'description', 'revision_description', 'role', 'auxiliaries']:
if not _compare_keys(bs1, bs2, k, operator.eq):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_metadata_file(output_path, data_dir):
'''Creates a METADATA.json file from a data directory
The file is written to output_path
'''
# Relative path to all (BASIS).metadata.json files
meta_filelist, table_filelist, _, _ = get_all_filelist(data_dir)
metadata = {}
for meta_file_relpath in meta_filelist:
# Read in the metadata for a single basis set
meta_file_path = os.path.join(data_dir, meta_file_relpath)
bs_metadata = read_json_basis(meta_file_path)
# Base of the filename for table basis sets
# Basename is something like '6-31G.', including the last period
base_relpath, meta_filename = os.path.split(meta_file_relpath)
base_filename = meta_filename.split('.')[0] + '.'
# All the table files that correspond to this metadata file
# (relative to data_dir)
this_filelist = [
x for x in table_filelist
if os.path.dirname(x) == base_relpath and os.path.basename(x).startswith(base_filename)
]
# The 'versions' dict that will go into the metadata
version_info = {}
# Make sure function types are the same
function_types = None
# For each table basis, compose it
for table_file in this_filelist:
# Obtain just the filename of the table basis
table_filename = os.path.basename(table_file)
# Obtain the base filename and version from the filename
# The base filename is the part before the first period
# (filebase.ver.table.json)
table_filebase, ver, _, _ = table_filename.split('.')
# Fully compose the basis set from components
bs = compose_table_basis(table_file, data_dir)
# Elements for which this basis is defined
defined_elements = sorted(list(bs['elements'].keys()), key=lambda x: int(x))
# Determine the types of functions contained in the basis
# (gto, ecp, etc)
if function_types is None:
function_types = bs['function_types']
elif function_types != bs['function_types']:
raise RuntimeError("Differing function types across versions for " + base_filename)
# Create the metadata for this specific version
# yapf: disable
version_info[ver] = { 'file_relpath': table_file,
'revdesc': bs['revision_description'],
'elements': defined_elements
}
# yapf: enable
# Sort the version dicts
version_info = dict(sorted(version_info.items()))
# Find the maximum version for this basis
latest_ver = max(version_info.keys())
# Create the common metadata for this basis set
# display_name and other_names are placeholders to keep order
# yapf: disable
common_md = { 'display_name': None,
'other_names': None,
'description': bs['description'],
'latest_version': latest_ver,
'basename': base_filename[:-1], # Strip off that trailing period
'relpath': base_relpath,
'family': bs['family'],
'role': bs['role'],
'functiontypes': function_types,
'auxiliaries': bs['auxiliaries'],
'versions': version_info }
# yapf: enable
# Loop through all the common names, translate them, and then add the data
for bs_name in bs_metadata['names']:
tr_name = transform_basis_name(bs_name)
if tr_name in metadata:
raise RuntimeError("Duplicate basis set name: " + tr_name)
# Create a new entry, with all the common metadata
# Also, store the other names for this basis
other_names = bs_metadata['names'].copy()
other_names.remove(bs_name)
metadata[tr_name] = common_md.copy()
metadata[tr_name]['display_name'] = bs_name
metadata[tr_name]['other_names'] = other_names
# Write out the metadata
metadata = dict(sorted(metadata.items()))
_write_plain_json(output_path, metadata) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_txt(refs):
'''Converts references to plain text format
'''
full_str = '\n'
lib_citation_desc, lib_citations = get_library_citation()
# Add the refs for the libarary at the top
full_str += '*' * 80 + '\n'
full_str += lib_citation_desc
full_str += '*' * 80 + '\n'
for r in lib_citations.values():
ref_txt = reference_text(r)
ref_txt = textwrap.indent(ref_txt, ' ' * 4)
full_str += '{}\n\n'.format(ref_txt)
full_str += '*' * 80 + '\n'
full_str += "References for the basis set\n"
full_str += '*' * 80 + '\n'
for ref in refs:
full_str += '{}\n'.format(compact_elements(ref['elements']))
for ri in ref['reference_info']:
full_str += ' ## {}\n'.format(ri['reference_description'])
refdata = ri['reference_data']
if len(refdata) == 0:
full_str += ' (...no reference...)\n\n'
for k, r in refdata:
ref_txt = reference_text(r)
ref_txt = textwrap.indent(ref_txt, ' ' * 4)
full_str += '{}\n\n'.format(ref_txt)
return full_str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def diff_basis_dict(left_list, right_list):
'''
Compute the difference between two sets of basis set dictionaries
The result is a list of dictionaries that correspond to each dictionary in
`left_list`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the dictionaries in `right_list`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
The return value contains deep copies of the input data
Parameters
----------
left_list : list of dict
Dictionaries to use as the base
right_list : list of dict
Dictionaries of basis data to subtract from each dictionary of `left_list`
Returns
----------
list
Each object in `left_list` containing data that does not appear in `right_list`
'''
ret = []
for bs1 in left_list:
res = copy.deepcopy(bs1)
for bs2 in right_list:
for el in res['elements'].keys():
if not el in bs2['elements']:
continue # Element only exist in left
eldata1 = res['elements'][el]
eldata2 = bs2['elements'][el]
s1 = eldata1['electron_shells']
s2 = eldata2['electron_shells']
eldata1['electron_shells'] = subtract_electron_shells(s1, s2)
# Remove any empty elements
res['elements'] = {k: v for k, v in res['elements'].items() if len(v['electron_shells']) > 0}
ret.append(res)
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def diff_json_files(left_files, right_files):
'''
Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
`left_files`. Each resulting dictionary will contain only the elements/shells
that exist in that entry and not in any of the files in `right_files`.
This only works on the shell level, and will only subtract entire shells
that are identical. ECP potentials are not affected.
`left_files` and `right_files` are lists of file paths. The output
is written to files with the same names as those in `left_files`,
but with `.diff` added to the end. If those files exist, they are overwritten.
Parameters
----------
left_files : list of str
Paths to JSON files to use as the base
right_files : list of str
Paths to JSON files to subtract from each file of `left_files`
Returns
----------
None
'''
left_data = [fileio.read_json_basis(x) for x in left_files]
right_data = [fileio.read_json_basis(x) for x in right_files]
d = diff_basis_dict(left_data, right_data)
for idx, diff_bs in enumerate(d):
fpath = left_files[idx]
fileio.write_json_basis(fpath + '.diff', diff_bs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shells_difference(s1, s2):
""" Computes and prints the differences between two lists of shells If the shells contain a different number primitives, or the lists are of different length, inf is returned. Otherwise, the maximum relative difference is returned. """ |
max_rdiff = 0.0
nsh = len(s1)
if len(s2) != nsh:
print("Different number of shells: {} vs {}".format(len(s1), len(s2)))
return float('inf')
shells1 = sort_shells(s1)
shells2 = sort_shells(s2)
for n in range(nsh):
sh1 = shells1[n]
sh2 = shells2[n]
if sh1['angular_momentum'] != sh2['angular_momentum']:
print("Different angular momentum for shell {}".format(n))
return float('inf')
nprim = len(sh1['exponents'])
if len(sh2['exponents']) != nprim:
print("Different number of primitives for shell {}".format(n))
return float('inf')
ngen = len(sh1['coefficients'])
if len(sh2['coefficients']) != ngen:
print("Different number of general contractions for shell {}".format(n))
return float('inf')
for p in range(nprim):
e1 = sh1['exponents'][p]
e2 = sh2['exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = sh1['coefficients'][g][p]
c2 = sh2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print("Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these shells: {}".format(max_rdiff))
return max_rdiff |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.