text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_dict(self):
"""Returns a JSON-serializeable object representing this tree.""" |
def conv(v):
if isinstance(v, SerializableAttributesHolder):
return v.as_dict()
elif isinstance(v, list):
return [conv(x) for x in v]
elif isinstance(v, dict):
return {x:conv(y) for (x,y) in v.items()}
else:
return v
return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_json(cls, data):
"""Decode a JSON string and inflate a node instance.""" |
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_keywords(func):
""" Parses the keywords from the given function. :param func | <function> """ |
if hasattr(func, 'im_func'):
func = func.im_func
try:
return func.func_code.co_varnames[-len(func.func_defaults):]
except (TypeError, ValueError, IndexError):
return tuple() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jtag_enable(self):
""" Enables JTAG output on the controller. JTAG operations executed before this function is called will return useless data or fail. Usage: """ |
status, _ = self.bulkCommand(_BMSG_ENABLE_JTAG)
if status == 0:
self._jtagon = True
elif status == 3:
self._jtagon = True
raise JTAGAlreadyEnabledError()
else:
raise JTAGEnableFailedError("Error enabling JTAG. Error code: %s." %status) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jtag_disable(self):
""" Disables JTAG output on the controller. JTAG operations executed immediately after this function will return useless data or fail. Usage: """ |
if not self._jtagon: return
status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG)
if status == 0:
self._jtagon = False
elif status == 3:
raise JTAGControlError("Error Code %s"%status)
self.close_handle() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False):
""" Command controller to write arbitrary TDI and TMS data to the physical scan chain. Optionally return TDO bits sent back from the scan chain. Args: tmsdata - bits to send over TMS line of scan chain (bitarray) must be the same length ad tdidata tdidata - bits to send over TDI line of scan chain (bitarray) must be the same length ad tmsdata return_tdo (bool) - return the devices bitarray response Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: bitarray("11111"), return_tdo=True) """ |
self._check_jtag()
if len(tmsdata) != len(tdidata):
raise Exception("TMSdata and TDIData must be the same length")
self._update_scanchain(tmsdata)
count = len(tmsdata)
t = time()
outdata = bitarray([val for pair in zip(tmsdata, tdidata)
for val in pair])
outdata = build_byte_align_buff(outdata).tobytes()[::-1]
if self._scanchain and self._scanchain._print_statistics:
print("TDI/TDI DATA PREP TIME", time()-t)#pragma: no cover
t = time()
self.bulkCommandDefault(_BMSG_WRITE_TMS_TDI % \
(return_tdo, count.to_bytes(4, 'little')))
self.bulkWriteData(outdata)
if self._scanchain and self._scanchain._print_statistics:
print("TRANSFER TIME", time()-t)
t = time()
tdo_bits = self._read_tdo(count) if return_tdo else None
if self._scanchain and self._scanchain._print_statistics:
print("TDO READ TIME", time()-t)#pragma: no cover
self._get_adv_trans_stats(0x0A, return_tdo)
return tdo_bits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _readFastaFile(filepath):
"""Read a FASTA file and yields tuples of 'header' and 'sequence' entries. :param filepath: file path of the FASTA file :yields: FASTA entries in the format ('header', 'sequence'). The 'header' string does not contain the '>' and trailing white spaces. The 'sequence' string does not contain trailing white spaces, a '*' at the end of the sequence is removed. See also :func:`importProteinDatabase` and :func:`maspy.peptidemethods.digestInSilico`. """ |
processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*')
processHeaderLine = lambda line: line[1:].rstrip()
with io.open(filepath) as openfile:
#Iterate through lines until the first header is encountered
try:
line = next(openfile)
while line[0] != '>':
line = next(openfile)
header = processHeaderLine(line)
sequences = list()
except StopIteration:
errorText = 'File does not contain fasta entries.'
raise maspy.errors.FileFormatError(errorText)
for line in openfile:
if line[0] == '>':
yield header, processSequences(sequences)
header = processHeaderLine(line)
sequences = list()
else:
sequences.append(line)
#Yield last entry
if sequences:
yield header, processSequences(sequences) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fastaParseSgd(header):
"""Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header """ |
rePattern = '([\S]+)\s([\S]+).+(\".+\")'
ID, name, description = re.match(rePattern, header).groups()
info = {'id':ID, 'name':name, 'description':description}
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, path, compress=True):
"""Writes the ``.proteins`` and ``.peptides`` entries to the hard disk as a ``proteindb`` file. .. note:: If ``.save()`` is called and no ``proteindb`` file is present in the specified path a new files is generated, otherwise the old file is replaced. :param path: filedirectory to which the ``proteindb`` file is written. The output file name is specified by ``self.info['name']`` :param compress: bool, True to use zip file compression """ |
with aux.PartiallySafeReplace() as msr:
filename = self.info['name'] + '.proteindb'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, compress=compress) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(cls, path, name):
"""Imports the specified ``proteindb`` file from the hard disk. :param path: filedirectory of the ``proteindb`` file :param name: filename without the file extension ".proteindb" .. note:: this generates rather large files, which actually take longer to import than to newly generate. Maybe saving / loading should be limited to the protein database whitout in silico digestion information. """ |
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'),
encoding='utf-8'
).read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
newInstance = cls()
newInstance.proteins = json.loads(proteinsString,
object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString,
object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_keywords(codedata) : """ Fetch keywords by shaman.KeywordFetcher Get average probabilities of keyword and language """ |
# Read row in codedata and count keywords in codes with langauge
tmp = {}
language_counts = {}
for index, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if language not in tmp :
tmp[language] = {}
language_counts[language] = 0
language_counts[language] += 1
for keyword in shaman.KeywordFetcher.fetch( code ) :
# if keyword exists in fetched data, add '1' to keyword data
tmp[language][keyword] = tmp[language].get(keyword, 0) + 1
print('Fetch keyword %d/%d ' % (index, len(codedata)), end='\r')
# Get dataset indexed by keyword
ret = {}
for language in tmp :
for keyword, count in tmp[ language ].items() :
if keyword not in ret :
ret[ keyword ] = {}
ret[ keyword ][ language ] = (count / language_counts[ language ]) # Probability
print('Fetch keyword completed ')
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_patterns(codedata) : """ Match patterns by shaman.PatternMatcher Get average ratio of pattern and language """ |
ret = {}
for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) :
print('Matching pattern %d "%s"' % (index1+1, pattern))
matcher = shaman.PatternMatcher(pattern)
tmp = {}
for index2, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if len(code) <= 20 or len(code) > 100000 :
continue
if language not in tmp :
tmp[language] = []
ratio = matcher.getratio(code)
tmp[language].append(ratio)
print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r')
ret[pattern] = {}
for language, data in tmp.items() :
ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1)
print('Matching patterns completed ')
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def facility(self, column=None, value=None, **kwargs):
""" Check information related to Radiation facilities. """ |
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def geo(self, column=None, value=None, **kwargs):
""" Locate a facility through geographic location. """ |
return self._resolve_call('RAD_GEO_LOCATION', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def regulation(self, column=None, value=None, **kwargs):
""" Provides relevant information about applicable regulations. """ |
return self._resolve_call('RAD_REGULATION', column, value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def regulatory_program(self, column=None, value=None, **kwargs):
""" Identifies the regulatory authority governing a facility, and, by virtue of that identification, also identifies the regulatory program of interest and the type of facility. """ |
return self._resolve_call('RAD_REGULATORY_PROG', column,
value, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_basic_info():
""" """ |
s = sys.version_info
_collect(json.dumps({'sys.version_info':tuple(s)}))
_collect(sys.version)
return sys.version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(function):
""" decorator that collect function call count. """ |
message = 'call:%s.%s' % (function.__module__,function.__name__)
@functools.wraps(function)
def wrapper(*args, **kwargs):
_collect(message)
return function(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_ip_addr_show(raw_result):
""" Parse the 'ip addr list dev' command raw output. :param str raw_result: os raw result string. :rtype: dict :return: The parsed result of the show interface command in a \ dictionary of the form: :: { 'os_index' : '0', 'dev' : 'eth0', 'falgs_str': 'BROADCAST,MULTICAST,UP,LOWER_UP', 'mtu': 1500, 'state': 'down', 'link_type' 'ether', 'mac_address': '00:50:56:01:2e:f6', 'inet': '20.1.1.2', 'inet_mask': '24', 'inet6': 'fe80::42:acff:fe11:2', 'inte6_mask': '64' } """ |
# does link exist?
show_re = (
r'"(?P<dev>\S+)"\s+does not exist'
)
re_result = search(show_re, raw_result)
result = None
if not (re_result):
# match top two lines for serveral 'always there' variables
show_re = (
r'\s*(?P<os_index>\d+):\s+(?P<dev>\S+):\s+<(?P<falgs_str>.*)?>.*?'
r'mtu\s+(?P<mtu>\d+).+?state\s+(?P<state>\w+).*'
r'\s*link/(?P<link_type>\w+)\s+(?P<mac_address>\S+)'
)
re_result = search(show_re, raw_result, DOTALL)
result = re_result.groupdict()
# seek inet if its there
show_re = (
r'((inet )\s*(?P<inet>[^/]+)/(?P<inet_mask>\d{1,2}))'
)
re_result = search(show_re, raw_result)
if (re_result):
result.update(re_result.groupdict())
# seek inet6 if its there
show_re = (
r'((?<=inet6 )(?P<inet6>[^/]+)/(?P<inet6_mask>\d{1,2}))'
)
re_result = search(show_re, raw_result)
if (re_result):
result.update(re_result.groupdict())
# cleanup dictionary before returning
for key, value in result.items():
if value is not None:
if value.isdigit():
result[key] = int(value)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interface(enode, portlbl, addr=None, up=None, shell=None):
""" Configure a interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left "as-is"). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to add to the interface: - IPv4 address and netmask to assign to the interface in the form ``'192.168.20.20/24'``. - IPv6 address and subnets to assign to the interface in the form ``'2001::1/120'``. :param bool up: Bring up or down the interface. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """ |
assert portlbl
port = enode.ports[portlbl]
if addr is not None:
assert ip_interface(addr)
cmd = 'ip addr add {addr} dev {port}'.format(addr=addr, port=port)
response = enode(cmd, shell=shell)
assert not response
if up is not None:
cmd = 'ip link set dev {port} {state}'.format(
port=port, state='up' if up else 'down'
)
response = enode(cmd, shell=shell)
assert not response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_ip(enode, portlbl, addr, shell=None):
""" Remove an IP address from an interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left "as-is"). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to remove from the interface: - IPv4 address to remove from the interface in the form ``'192.168.20.20'`` or ``'192.168.20.20/24'``. - IPv6 address to remove from the interface in the form ``'2001::1'`` or ``'2001::1/120'``. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """ |
assert portlbl
assert ip_interface(addr)
port = enode.ports[portlbl]
cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)
response = enode(cmd, shell=shell)
assert not response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_route(enode, route, via, shell=None):
""" Add a new static route. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str route: Route to add, an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'`` or ``'default'``. :param str via: Via for the route as an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'``. :param shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. :type shell: str or None """ |
via = ip_address(via)
version = '-4'
if (via.version == 6) or \
(route != 'default' and ip_network(route).version == 6):
version = '-6'
cmd = 'ip {version} route add {route} via {via}'.format(
version=version, route=route, via=via
)
response = enode(cmd, shell=shell)
assert not response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_link_type_vlan(enode, portlbl, name, vlan_id, shell=None):
""" Add a new virtual link with the type set to VLAN. Creates a new vlan device {name} on device {port}. Will raise an exception if value is already assigned. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped automatically. :param str name: specifies the name of the new virtual device. :param str vlan_id: specifies the VLAN identifier. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """ |
assert name
if name in enode.ports:
raise ValueError('Port {name} already exists'.format(name=name))
assert portlbl
assert vlan_id
port = enode.ports[portlbl]
cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}'.format(
dev=port, name=name, vlan_id=vlan_id)
response = enode(cmd, shell=shell)
assert not response, 'Cannot add virtual link {name}'.format(name=name)
enode.ports[name] = name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_link_type_vlan(enode, name, shell=None):
""" Delete a virtual link. Deletes a vlan device with the name {name}. Will raise an expection if the port is not already present. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str name: specifies the name of the new virtual device. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. """ |
assert name
if name not in enode.ports:
raise ValueError('Port {name} doesn\'t exists'.format(name=name))
cmd = 'ip link del link dev {name}'.format(name=name)
response = enode(cmd, shell=shell)
assert not response, 'Cannot remove virtual link {name}'.format(name=name)
del enode.ports[name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show_interface(enode, dev, shell=None):
""" Show the configured parameters and stats of an interface. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str dev: Unix network device name. Ex 1, 2, 3.. :rtype: dict :return: A combined dictionary as returned by both :func:`topology_lib_ip.parser._parse_ip_addr_show` :func:`topology_lib_ip.parser._parse_ip_stats_link_show` """ |
assert dev
cmd = 'ip addr list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
first_half_dict = _parse_ip_addr_show(response)
d = None
if (first_half_dict):
cmd = 'ip -s link list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
second_half_dict = _parse_ip_stats_link_show(response)
d = first_half_dict.copy()
d.update(second_half_dict)
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_mmd(target_folder=DEFAULT_LIBRARY_DIR):
"""Build and install the MultiMarkdown shared library.""" |
mmd_dir = tempfile.mkdtemp()
mmd_repo = pygit2.clone_repository('https://github.com/jasedit/MultiMarkdown-5', mmd_dir,
checkout_branch='fix_windows')
mmd_repo.init_submodules()
mmd_repo.update_submodules()
build_dir = os.path.join(mmd_dir, 'build')
old_pwd = os.getcwd()
os.chdir(build_dir)
cmake_cmd = ['cmake', '-DCMAKE_BUILD_TYPE=Release', '-DSHAREDBUILD=1', '..']
if platform.system() == 'Windows':
is_64bit = platform.architecture()[0] == '64bit'
generator = 'Visual Studio 14 2015{0}'.format(' Win64' if is_64bit else '')
cmake_cmd.insert(-1, '-G')
cmake_cmd.insert(-1, '{0}'.format(generator))
subprocess.call(cmake_cmd)
PLATFORM_BUILDS[platform.system()]()
lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()]
if not os.path.exists(target_folder):
os.mkdir(target_folder)
src = os.path.join(build_dir, SHLIB_PREFIX[platform.system()], lib_file)
dest = os.path.join(target_folder, lib_file)
shutil.copyfile(src, dest)
os.chdir(old_pwd)
shutil.rmtree(mmd_dir, ignore_errors=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_requirements_files(self, base_dir='.'):
""" Generate set of requirements files for config """ |
print("Creating requirements files\n")
# TODO How to deal with requirements that are not simple, e.g. a github url
shared = self._get_shared_section()
requirements_dir = self._make_requirements_directory(base_dir)
for section in self.config.sections():
if section == 'metadata':
continue
requirements = {}
for option in self.config.options(section):
requirements[option] = self.config.get(section, option)
if not requirements:
# No need to write out an empty file
continue
filename = os.path.join(requirements_dir, '%s.txt' % section)
self._write_requirements_file(shared, section, requirements, filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_default_sections(self):
""" Starting from scratch, so create a default rc file """ |
self.config.add_section('metadata')
self.config.set('metadata', 'shared', 'common')
self.config.add_section('common')
self.config.add_section('development')
self.config.add_section('production') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_requirements(self, input):
""" Parse a list of requirements specifications. Lines that look like "foobar==1.0" are parsed; all other lines are silently ignored. Returns a tuple of tuples, where each inner tuple is: (package, version) """ |
results = []
for line in input:
(package, version) = self._parse_line(line)
if package:
results.append((package, version))
return tuple(results) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_rc_file(self, packages):
""" Create a set of requirements files for config """ |
print("Creating rcfile '%s'\n" % self.rc_filename)
# TODO bug with == in config file
if not self.config.sections():
self._write_default_sections()
sections = {}
section_text = []
for i, section in enumerate(self.config.sections()):
if section == 'metadata':
continue
sections[i] = section
section_text.append('%s. %s' % (i, section))
section_text = ' / '.join(section_text)
self._remap_stdin()
package_names = set()
lines = packages.readlines()
requirements = self._parse_requirements(lines)
for (package, version) in requirements:
package_names.add(package)
section, configured_version = self._get_option(package)
# Package already exists in configuration
if section:
# If there is a configured version, update it. If not, leave it unversioned.
if configured_version:
if configured_version != version:
print("Updating '%s' version from '%s' to '%s'"
% (package, configured_version, version))
self.config.set(section, package, version)
continue
section = self._get_section(package, sections, section_text)
self._set_option(section, package, version)
for section in self.config.sections():
if section == 'metadata':
continue
for option in self.config.options(section):
if option not in package_names:
print("Removing package '%s'" % option)
self.config.remove_option(section, option)
rc_file = open(self.rc_filename, 'w+')
self.config.write(rc_file)
rc_file.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upgrade_packages(self, packages):
""" Upgrade all specified packages to latest version """ |
print("Upgrading packages\n")
package_list = []
requirements = self._parse_requirements(packages.readlines())
for (package, version) in requirements:
package_list.append(package)
if package_list:
args = [
"pip",
"install",
"-U",
]
args.extend(package_list)
subprocess.check_call(args)
else:
print("No packages to upgrade") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def determine_extra_packages(self, packages):
""" Return all packages that are installed, but missing from "packages". Return value is a tuple of the package names """ |
args = [
"pip",
"freeze",
]
installed = subprocess.check_output(args, universal_newlines=True)
installed_list = set()
lines = installed.strip().split('\n')
for (package, version) in self._parse_requirements(lines):
installed_list.add(package)
package_list = set()
for (package, version) in self._parse_requirements(packages.readlines()):
package_list.add(package)
removal_list = installed_list - package_list
return tuple(removal_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_extra_packages(self, packages, dry_run=False):
""" Remove all packages missing from list """ |
removal_list = self.determine_extra_packages(packages)
if not removal_list:
print("No packages to be removed")
else:
if dry_run:
print("The following packages would be removed:\n %s\n" %
"\n ".join(removal_list))
else:
print("Removing packages\n")
args = [
"pip",
"uninstall",
"-y",
]
args.extend(list(removal_list))
subprocess.check_call(args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rewrap(self, **kwargs):
"""Inplace constructor. Depending on `self.inplace`, rewrap `obj`, or just update internal vars, possibly including the `obj`. """ |
if self.inplace:
for key, val in kwargs.items():
setattr(self, key, val)
return self
else:
for key in ['obj', 'default', 'skipmissing', 'inplace', 'empty']:
kwargs.setdefault(key, getattr(self, key))
return pluckable(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sliced_list(self, selector):
"""For slice selectors operating on lists, we need to handle them differently, depending on ``skipmissing``. In explicit mode, we may have to expand the list with ``default`` values. """ |
if self.skipmissing:
return self.obj[selector]
# TODO: can be optimized by observing list bounds
keys = xrange(selector.start or 0,
selector.stop or sys.maxint,
selector.step or 1)
res = []
for key in keys:
self._append(self.obj, key, res, skipmissing=False)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forceutc(t: Union[str, datetime.datetime, datetime.date, np.datetime64]) -> Union[datetime.datetime, datetime.date]: """ Add UTC to datetime-naive and convert to UTC for datetime aware input: python datetime (naive, utc, non-utc) or Numpy datetime64 #FIXME add Pandas and AstroPy time classes output: utc datetime """ |
# need to passthrough None for simpler external logic.
# %% polymorph to datetime
if isinstance(t, str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, datetime.datetime):
pass
elif isinstance(t, datetime.date):
return t
elif isinstance(t, (np.ndarray, list, tuple)):
return np.asarray([forceutc(T) for T in t])
else:
raise TypeError('datetime only input')
# %% enforce UTC on datetime
if t.tzinfo is None: # datetime-naive
t = t.replace(tzinfo=UTC)
else: # datetime-aware
t = t.astimezone(UTC) # changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC
return t |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_a_new_working_directory(context):
""" Creates a new, empty working directory """ |
command_util.ensure_context_attribute_exists(context, "workdir", None)
command_util.ensure_workdir_exists(context)
shutil.rmtree(context.workdir, ignore_errors=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_use_curdir_as_working_directory(context):
""" Uses the current directory as working directory """ |
context.workdir = os.path.abspath(".")
command_util.ensure_workdir_exists(context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_an_empty_file_named_filename(context, filename):
""" Creates an empty file. """ |
assert not os.path.isabs(filename)
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_i_run_command(context, command):
""" Run a command as subprocess, collect its output and returncode. """ |
command_util.ensure_workdir_exists(context)
context.command_result = command_shell.run(command, cwd=context.workdir)
command_util.workdir_save_coverage_files(context.workdir)
if False and DEBUG:
print(u"run_command: {0}".format(command))
print(u"run_command.output {0}".format(context.command_result.output)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def step_command_output_should_contain_exactly_text(context, text):
""" Verifies that the command output of the last command contains the expected text. .. code-block:: gherkin When I run "echo Hello" Then the command output should contain "Hello" """ |
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_contain_exactly(actual_output, expected_text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_file_list(path, max_depth=1, cur_depth=0):
""" Recursively returns a list of all files up to ``max_depth`` in a directory. """ |
if os.path.exists(path):
for name in os.listdir(path):
if name.startswith('.'):
continue
full_path = os.path.join(path, name)
if os.path.isdir(full_path):
if cur_depth == max_depth:
continue
file_list = get_file_list(full_path, max_depth, cur_depth + 1)
for result in file_list:
yield result
else:
yield full_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_applied_migrations(databases=None):
""" Returns a dictionary containing lists of all applied migrations where the key is the database alias. """ |
if not databases:
databases = get_capable_databases()
else:
# We only loop through databases that are listed as "capable"
all_databases = list(get_capable_databases())
databases = list(
itertools.ifilter(lambda x: x in all_databases, databases)
)
results = defaultdict(list)
for db in databases:
for x in Migration.objects.using(db).order_by("migration_label"):
results[db].append(x.migration_label)
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getContGroupArrays(arrays, groupPositions, arrayKeys=None):
"""Convinience function to generate a subset of arrays from specified array positions. :param arrays: a dictionary containing ``numpy.arrays`` :param groupPositions: arrays positions that should be included in the subset of arrays :param arrayKeys: a list of "arrays" keys that should be included in the subset of arrays, if None all keys are selected :returns: a dictionary containing ``numpy.arrays`` """ |
if arrayKeys is None:
arrayKeys = list(viewkeys(arrays))
matchingArrays = dict()
for key in arrayKeys:
matchingArrays[key] = arrays[key][groupPositions]
return matchingArrays |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calcDistMatchArr(matchArr, tKey, mKey):
"""Calculate the euclidean distance of all array positions in "matchArr". :param matchArr: a dictionary of ``numpy.arrays`` containing at least two entries that are treated as cartesian coordinates. :param tKey: #TODO: docstring :param mKey: #TODO: docstring :returns: #TODO: docstring } """ |
#Calculate all sorted list of all eucledian feature distances
matchArrSize = listvalues(matchArr)[0].size
distInfo = {'posPairs': list(), 'eucDist': list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo['posPairs'].append((pos1, pos2))
distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo['eucDist'])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, path, name):
"""Imports the specified ``fgic`` file from the hard disk. :param path: filedirectory to which the ``fgic`` file is written. :param name: filename, without file extension """ |
filename = name + '.fgic'
filepath = aux.joinpath(path, filename)
with zipfile.ZipFile(filepath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
self.container = json.loads(jsonString, object_hook=Fgi.jsonHook)
self.info.update(json.loads(infoString))
self._matrixTemplate = self.info['_matrixTemplate']
del self.info['_matrixTemplate'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(cls, path, encoding='utf-8'):
"""Create a new bare repository""" |
cmd = [GIT, 'init', '--quiet', '--bare', path]
subprocess.check_call(cmd)
return cls(path, encoding) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cache(self, dependency: Dependency, value):
""" Store an instance of dependency in the cache. Does nothing if dependency is NOT a threadlocal or a singleton. :param dependency: The ``Dependency`` to cache :param value: The value to cache for dependency :type dependency: Dependency """ |
if dependency.threadlocal:
setattr(self._local, dependency.name, value)
elif dependency.singleton:
self._singleton[dependency.name] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cached(self, dependency):
""" Get a cached instance of dependency. :param dependency: The ``Dependency`` to retrievie value for :type dependency: ``Dependency`` :return: The cached value """ |
if dependency.threadlocal:
return getattr(self._local, dependency.name, None)
elif dependency.singleton:
return self._singleton.get(dependency.name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set(self, name, factory, singleton=False, threadlocal=False):
""" Add a dependency factory to the registry :param name: Name of dependency :param factory: function/callable that returns dependency :param singleton: When True, makes the dependency a singleton. Factory will only be called on first use, subsequent uses receive a cached value. :param threadlocal: When True, register dependency as a threadlocal singleton, Same functionality as ``singleton`` except :class:`Threading.local` is used to cache return values. """ |
name = name or factory.__name__
factory._giveme_registered_name = name
dep = Dependency(name, factory, singleton, threadlocal)
self._registry[name] = dep |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, function=None, *, singleton=False, threadlocal=False, name=None):
""" Add an object to the injector's registry. Can be used as a decorator like so: or a plain function call by passing in a callable injector.register(my_dependency) :param function: The function or callable to add to the registry :param name: Set the name of the dependency. Defaults to the name of `function` :param singleton: When True, register dependency as a singleton, this means that `function` is called on first use and its return value cached for subsequent uses. Defaults to False :param threadlocal: When True, register dependency as a threadlocal singleton, Same functionality as ``singleton`` except :class:`Threading.local` is used to cache return values. :type function: callable :type singleton: bool :type threadlocal: bool :type name: string """ |
def decorator(function=None):
self._set(name, function, singleton, threadlocal)
return function
if function:
return decorator(function)
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inject(self, function=None, **names):
""" Inject dependencies into `funtion`'s arguments when called. The `Injector` will look for registered dependencies matching named arguments and automatically pass them to the given function when it's called. :param function: The function to inject into :type function: callable :param \**names: in the form of ``argument='name'`` to override the default behavior which matches dependency names with argument names. """ |
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
sig = signature(function)
params = sig.parameters
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
injected_kwargs = {}
for key, value in params.items():
if key not in bound.arguments:
name = names.get(key)
if name:
# Raise error when dep named explicitly
# and missing
injected_kwargs[key] = self.get(name)
else:
try:
injected_kwargs[key] = self.get(key)
except DependencyNotFoundError as e:
warnings.warn(
ambigious_not_found_msg.format(key),
DependencyNotFoundWarning
)
injected_kwargs.update(bound.kwargs)
return function(*bound.args, **injected_kwargs)
return wrapper
if function:
return decorator(function)
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve(self, dependency):
""" Resolve dependency as instance attribute of given class. When the attribute is first accessed, it will be resolved from the corresponding dependency function """ |
if isinstance(dependency, str):
name = dependency
else:
name = dependency._giveme_registered_name
return DeferredProperty(
partial(self.get, name)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_itemslist(self, current_item):
""" Get a all available apis """ |
if current_item.is_root:
html = requests.get(self.base_url).text
soup = BeautifulSoup(html, 'html.parser')
for item_html in soup.select(".row .col-md-6"):
try:
label = item_html.select_one("h2").text
except Exception:
continue
yield API(label, blob=item_html)
else:
# parameter = current_item.parent
# data = requests.get(parameter.url)
for resource in current_item.json["resource"]:
label = u"{}, {}".format(resource["title"], resource["summary"])
yield SMHIDataset(label, blob=resource) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_data(self, dataset, query={}, include_inactive_stations=False):
""" Should yield dataset rows """ |
data = []
parameter = dataset
station_dim = dataset.dimensions["station"]
all_stations = station_dim.allowed_values
# Step 1: Prepare query
if "station" not in query:
if include_inactive_stations:
# Get all stations
query["station"] = list(all_stations)
else:
# Get only active stations
query["station"] = list(station_dim.active_stations())
else:
if not isinstance(query["station"], list):
query["station"] = [query["station"]]
# Make sure that the queried stations actually exist
query["station"] = [ all_stations.get_by_label(x) for x in query["station"]]
if "period" not in query:
# TODO: I'd prepare to do dataset.get("period").allowed_values here
query["period"] = PERIODS
elif not isinstance(query["period"], list):
query["period"] = [query["period"]]
for period in query["period"]:
if period not in PERIODS:
msg = u"{} is not an allowed period".format(period)
raise Exception(msg)
# Step 3: Get data
n_queries = len(query["station"]) * len(query["period"])
counter = 0
print("Fetching data with {} queries.".format(n_queries))
for station in query["station"]:
for period in query["period"]:
url = dataset.url\
.replace(".json", "/station/{}/period/{}/data.csv"\
.format(station.key, period))
print("/GET {} ".format(url))
r = requests.get(url)
if r.status_code == 200:
raw_data = DataCsv().from_string(r.content).to_dictlist()
# TODO: This is a very hard coded parse function
# Expects fixed start row and number of cols
for row in raw_data:
#timepoint = datetime.strptime(timepoint_str, "%Y-%m-%d %H:%M:%S")
value_col = parameter.id.split(",")[0]
value = float(row[value_col])
row["parameter"] = parameter.id
row["station"] = station.label
row["station_key"] = station.key
row["period"] = period
row.pop(value_col,None)
datapoint = Result(value, row)
yield datapoint
elif r.status_code == 404:
print("Warning no data at {}".format(url))
else:
raise Exception("Connection error for {}".format(url)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_example_csv(self):
"""For dimension parsing """ |
station_key = self.json["station"][0]["key"]
period = "corrected-archive"
url = self.url\
.replace(".json", "/station/{}/period/{}/data.csv"\
.format(station_key, period))
r = requests.get(url)
if r.status_code == 200:
return DataCsv().from_string(r.content)
else:
raise Exception("Error connecting to api") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plural(formatter, value, name, option, format):
"""Chooses different textension for locale-specific pluralization rules. Example:: There is an item. There are 10 items. """ |
# Extract the plural words from the format string.
words = format.split('|')
# This extension requires at least two plural words.
if not name and len(words) == 1:
return
# This extension only formats numbers.
try:
number = decimal.Decimal(value)
except (ValueError, decimal.InvalidOperation):
return
# Get the locale.
locale = Locale.parse(option) if option else formatter.locale
# Select word based on the plural tag index.
index = get_plural_tag_index(number, locale)
return formatter.format(words[index], value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_choice(value):
"""Gets a key to choose a choice from any value.""" |
if value is None:
return 'null'
for attr in ['__name__', 'name']:
if hasattr(value, attr):
return getattr(value, attr)
return str(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def choose(formatter, value, name, option, format):
"""Adds simple logic to format strings. Example:: u'one' u'other' """ |
if not option:
return
words = format.split('|')
num_words = len(words)
if num_words < 2:
return
choices = option.split('|')
num_choices = len(choices)
# If the words has 1 more item than the choices, the last word will be
# used as a default choice.
if num_words not in (num_choices, num_choices + 1):
n = num_choices
raise ValueError('specify %d or %d choices' % (n, n + 1))
choice = get_choice(value)
try:
index = choices.index(choice)
except ValueError:
if num_words == num_choices:
raise ValueError('no default choice supplied')
index = -1
return formatter.format(words[index], value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_(formatter, value, name, option, format):
"""Repeats the items of an array. Spec: `{:[l[ist]:]item|spacer[|final_spacer[|two_spacer]]}` Example:: u'apple, banana, and coconut' u'apple and banana' """ |
if not format:
return
if not hasattr(value, '__getitem__') or isinstance(value, string_types):
return
words = format.split(u'|', 4)
num_words = len(words)
if num_words < 2:
# Require at least two words for item format and spacer.
return
num_items = len(value)
item_format = words[0]
# NOTE: SmartFormat.NET treats a not nested item format as the format
# string to format each items. For example, `x` will be treated as `{:x}`.
# But the original tells us this behavior has been deprecated so that
# should be removed. So SmartFormat for Python doesn't implement the
# behavior.
spacer = u'' if num_words < 2 else words[1]
final_spacer = spacer if num_words < 3 else words[2]
two_spacer = final_spacer if num_words < 4 else words[3]
buf = io.StringIO()
for x, item in enumerate(value):
if x == 0:
pass
elif x < num_items - 1:
buf.write(spacer)
elif x == 1:
buf.write(two_spacer)
else:
buf.write(final_spacer)
buf.write(formatter.format(item_format, item, index=x))
return buf.getvalue() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_months(datetime_like_object, n, return_date=False):
""" Returns a time that n months after a time. Notice: for example, the date that one month after 2015-01-31 supposed to be 2015-02-31. But there's no 31th in Feb, so we fix that value to 2015-02-28. :param datetimestr: a datetime object or a datetime str :param n: number of months, value can be negative :param return_date: returns a date object instead of datetime **中文文档** 返回给定日期N月之后的时间。 """ |
a_datetime = parser.parse_datetime(datetime_like_object)
month_from_ordinary = a_datetime.year * 12 + a_datetime.month
month_from_ordinary += n
year, month = divmod(month_from_ordinary, 12)
# try assign year, month, day
try:
a_datetime = datetime(
year, month, a_datetime.day,
a_datetime.hour, a_datetime.minute, a_datetime.second,
a_datetime.microsecond, tzinfo=a_datetime.tzinfo,
)
# 肯定是由于新的月份的日子不够, 所以肯定是月底,
# 那么直接跳到下一个月的第一天, 再回退一天
except ValueError:
month_from_ordinary += 1
year, month = divmod(month_from_ordinary, 12)
a_datetime = datetime(
year, month, 1,
a_datetime.hour, a_datetime.minute, a_datetime.second,
a_datetime.microsecond, tzinfo=a_datetime.tzinfo,
)
a_datetime = add_days(a_datetime, -1)
if return_date: # pragma: no cover
return a_datetime.date()
else:
return a_datetime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _log(self, content):
""" Write a string to the log """ |
self._buffer += content
if self._auto_flush:
self.flush() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
""" Erase the log and reset the timestamp """ |
self._buffer = ''
self._chars_flushed = 0
self._game_start_timestamp = datetime.datetime.now() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logpath(self):
""" Return the logfile path and filename as a string. The file with name self.logpath() is written to on flush(). The filename contains the log's timestamp and the names of players in the game. The logpath changes when reset() or _set_players() are called, as they change the timestamp and the players, respectively. """ |
name = '{}-{}.catan'.format(self.timestamp_str(),
'-'.join([p.name for p in self._players]))
path = os.path.join(self._log_dir, name)
if not os.path.exists(self._log_dir):
os.mkdir(self._log_dir)
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flush(self):
""" Append the latest updates to file, or optionally to stdout instead. See the constructor for logging options. """ |
latest = self._latest()
self._chars_flushed += len(latest)
if self._use_stdout:
file = sys.stdout
else:
file = open(self.logpath(), 'a')
print(latest, file=file, flush=True, end='')
if not self._use_stdout:
file.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_game_start(self, players, terrain, numbers, ports):
""" Begin a game. Erase the log, set the timestamp, set the players, and write the log header. The robber is assumed to start on the desert (or off-board). :param players: iterable of catan.game.Player objects :param terrain: list of 19 catan.board.Terrain objects. :param numbers: list of 19 catan.board.HexNumber objects. :param ports: list of catan.board.Port objects. """ |
self.reset()
self._set_players(players)
self._logln('{} v{}'.format(__name__, __version__))
self._logln('timestamp: {0}'.format(self.timestamp_str()))
self._log_players(players)
self._log_board_terrain(terrain)
self._log_board_numbers(numbers)
self._log_board_ports(ports)
self._logln('...CATAN!') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _log_board_ports(self, ports):
""" A board with no ports is allowed. In the logfile, ports must be sorted - ascending by tile identifier (primary) - alphabetical by edge direction (secondary) :param ports: list of catan.board.Port objects """ |
ports = sorted(ports, key=lambda port: (port.tile_id, port.direction))
self._logln('ports: {0}'.format(' '.join('{}({} {})'.format(p.type.value, p.tile_id, p.direction)
for p in ports))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _SetGuide(self, guideName):
""" Select guide corresponding to guideName Parameters guideName : string Name of guide to use. Note Supported guide names are: EPGUIDES """ |
if(guideName == epguides.EPGuidesLookup.GUIDE_NAME):
self._guide = epguides.EPGuidesLookup()
else:
raise Exception("[RENAMER] Unknown guide set for TVRenamer selection: Got {}, Expected {}".format(guideName, epguides.EPGuidesLookup.GUIDE_NAME)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GetUniqueFileShowNames(self, tvFileList):
""" Return a list containing all unique show names from tvfile.TVFile object list. Parameters tvFileList : list List of tvfile.TVFile objects. Returns set The set of show names from the tvfile.TVFile list. """ |
showNameList = [tvFile.fileInfo.showName for tvFile in tvFileList]
return(set(showNameList)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GetShowInfo(self, stringSearch):
""" Calls GetShowID and does post processing checks on result. Parameters stringSearch : string String to look up in database or guide. Returns tvfile.ShowInfo or None If GetShowID returns None or if it returns showInfo with showID = None then this will return None, otherwise it will return the showInfo object. """ |
goodlogging.Log.Info("RENAMER", "Looking up show info for: {0}".format(stringSearch))
goodlogging.Log.IncreaseIndent()
showInfo = self._GetShowID(stringSearch)
if showInfo is None:
goodlogging.Log.DecreaseIndent()
return None
elif showInfo.showID is None:
goodlogging.Log.DecreaseIndent()
return None
elif showInfo.showName is None:
showInfo.showName = self._db.SearchTVLibrary(showID = showInfo.showID)[0][1]
goodlogging.Log.Info("RENAMER", "Found show name: {0}".format(showInfo.showName))
goodlogging.Log.DecreaseIndent()
return showInfo
else:
goodlogging.Log.DecreaseIndent()
return showInfo |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _CreateNewShowDir(self, showName):
""" Create new directory name for show. An autogenerated choice, which is the showName input that has been stripped of special characters, is proposed which the user can accept or they can enter a new name to use. If the skipUserInput variable is True the autogenerated value is accepted by default. Parameters showName : string Name of TV show Returns string or None Either the autogenerated directory name, the user given directory name or None if the user chooses to skip at this input stage. """ |
stripedDir = util.StripSpecialCharacters(showName)
goodlogging.Log.Info("RENAMER", "Suggested show directory name is: '{0}'".format(stripedDir))
if self._skipUserInput is False:
response = goodlogging.Log.Input('RENAMER', "Enter 'y' to accept this directory, 'x' to skip this show or enter a new directory to use: ")
else:
response = 'y'
if response.lower() == 'x':
return None
elif response.lower() == 'y':
return stripedDir
else:
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _GenerateLibraryPath(self, tvFile, libraryDir):
""" Creates a full path for TV file in TV library. This initially attempts to directly match a show directory in the database, if this fails it searches the library directory for the best match. The user can then select an existing match or can propose a new directory to use as the show root directory. The season directory is also generated and added to the show and library directories. This is then used by the tvFile GenerateNewFilePath method to create a new path for the file. Parameters tvFile : tvfile.TVFile Contains show and file info. libraryDir : string Root path of TV library directory. Returns tvfile.TVFile This is an updated version of the input object. """ |
goodlogging.Log.Info("RENAMER", "Looking up library directory in database for show: {0}".format(tvFile.showInfo.showName))
goodlogging.Log.IncreaseIndent()
showID, showName, showDir = self._db.SearchTVLibrary(showName = tvFile.showInfo.showName)[0]
if showDir is None:
goodlogging.Log.Info("RENAMER", "No directory match found in database - looking for best match in library directory: {0}".format(libraryDir))
dirList = os.listdir(libraryDir)
listDir = False
matchName = tvFile.showInfo.showName
while showDir is None:
if len(dirList) == 0:
goodlogging.Log.Info("RENAMER", "TV Library directory is empty")
response = None
else:
if listDir is True:
goodlogging.Log.Info("RENAMER", "TV library directory contains: {0}".format(', '.join(dirList)))
else:
matchDirList = util.GetBestMatch(matchName, dirList)
listDir = False
if self._skipUserInput is True:
if len(matchDirList) == 1:
response = matchDirList[0]
goodlogging.Log.Info("RENAMER", "Automatic selection of show directory: {0}".format(response))
else:
response = None
goodlogging.Log.Info("RENAMER", "Could not make automatic selection of show directory")
else:
listDirPrompt = "enter 'ls' to list all items in TV library directory"
response = util.UserAcceptance(matchDirList, promptComment = listDirPrompt, promptOnly = listDir, xStrOverride = "to create new show directory")
if response is None:
showDir = self._CreateNewShowDir(tvFile.showInfo.showName)
if showDir is None:
goodlogging.Log.DecreaseIndent()
return tvFile
elif response.lower() == 'ls':
listDir = True
elif response in matchDirList:
showDir = response
else:
matchName = response
self._db.UpdateShowDirInTVLibrary(showID, showDir)
# Add base directory to show path
showDir = os.path.join(libraryDir, showDir)
goodlogging.Log.DecreaseIndent()
# Lookup and add season directory to show path
seasonDir = self._LookUpSeasonDirectory(showID, showDir, tvFile.showInfo.seasonNum)
if seasonDir is None:
return tvFile
else:
showDir = os.path.join(showDir, seasonDir)
# Call tvFile function to generate file name
tvFile.GenerateNewFilePath(showDir)
return tvFile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def catch(ignore=[], was_doing="something important", helpfull_tips="you should use a debugger", gbc=None):
""" Catch, prepare and log error :param exc_cls: error class :param exc: exception :param tb: exception traceback """ |
exc_cls, exc, tb=sys.exc_info()
if exc_cls in ignore:
msg='exception in ignorelist'
gbc.say('ignoring caught:'+str(exc_cls))
return 'exception in ignorelist'
ex_message = traceback.format_exception_only(exc_cls, exc)[-1]
ex_message = ex_message.strip()
# TODO: print(ex_message)
error_frame = tb
while error_frame.tb_next is not None:
error_frame = error_frame.tb_next
file = error_frame.tb_frame.f_code.co_filename
line = error_frame.tb_lineno
stack = traceback.extract_tb(tb)
formated_stack = []
for summary in stack:
formated_stack.append({
'file': summary[0],
'line': summary[1],
'func': summary[2],
'text': summary[3]
})
event = {
'was_doing':was_doing,
'message': ex_message,
'errorLocation': {
'file': file,
'line': line,
'full': file + ' -> ' + str(line)
},
'stack': formated_stack
#,
#'time': time.time()
}
try:
#logging.info('caught:'+pformat(event))
gbc.cry('caught:'+pformat(event))
print('Bubble3: written error to log')
print('Bubble3: tips for fixing this:')
print(helpfull_tips)
except Exception as e:
print('Bubble3: cant log error cause of %s' % e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_name(api_url, name, dry_run=False):
""" doesn't require a token config param as all of our data is currently public """ |
return DataSet(
'/'.join([api_url, name]).rstrip('/'),
token=None,
dry_run=dry_run
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def secured_clipboard(item):
"""This clipboard only allows 1 paste """ |
expire_clock = time.time()
def set_text(clipboard, selectiondata, info, data):
# expire after 15 secs
if 15.0 >= time.time() - expire_clock:
selectiondata.set_text(item.get_secret())
clipboard.clear()
def clear(clipboard, data):
"""Clearing of the buffer is deferred this only gets called if the
paste is actually triggered
"""
pass
targets = [("STRING", 0, 0)
,("TEXT", 0, 1)
,("COMPOUND_TEXT", 0, 2)
,("UTF8_STRING", 0, 3)]
cp = gtk.clipboard_get()
cp.set_with_data(targets, set_text, clear) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_active_window():
"""Get the currently focused window """ |
active_win = None
default = wnck.screen_get_default()
while gtk.events_pending():
gtk.main_iteration(False)
window_list = default.get_windows()
if len(window_list) == 0:
print "No Windows Found"
for win in window_list:
if win.is_active():
active_win = win.get_name()
return active_win |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""Get quota from Cloud Provider.""" |
# get all network quota from Cloud Provider.
attrs = ("networks",
"security_groups",
"floating_ips",
"routers",
"internet_gateways")
for attr in attrs:
setattr(self, attr, eval("self.get_{}()". format(attr))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join_css_class(css_class, *additional_css_classes):
""" Returns the union of one or more CSS classes as a space-separated string. Note that the order will not be preserved. """ |
css_set = set(chain.from_iterable(
c.split(' ') for c in [css_class, *additional_css_classes] if c))
return ' '.join(css_set) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_routes_and_middlewares(self):
"""Initialize hooks and URI routes to resources.""" |
self._init_middlewares()
self._init_endpoints()
self.app = falcon.API(middleware=self.middleware)
self.app.add_error_handler(Exception, self._error_handler)
for version_path, endpoints in self.catalog:
for route, resource in endpoints:
self.app.add_route(version_path + route, resource) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def listen(self):
"""Self-host using 'bind' and 'port' from the WSGI config group.""" |
msgtmpl = (u'Serving on host %(host)s:%(port)s')
host = CONF.wsgi.wsgi_host
port = CONF.wsgi.wsgi_port
LOG.info(msgtmpl,
{'host': host, 'port': port})
server_cls = self._get_server_cls(host)
httpd = simple_server.make_server(host,
port,
self.app,
server_cls)
httpd.serve_forever() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_promise(self):
"""Return the special set of promises for run_instruction. Run Instruction has to support multiple promises (one for reading data, and one for reading back the status from IR. All other primitives have a single promise, so fitting multiple into this system causes some API consistencies. This should be reviewed to see if a more coherent alternative is available. """ |
if self._promise is None:
promise = []
if self.read:
promise.append(TDOPromise(self._chain, 0, self.bitcount))
else:
promise.append(None)
if self.read_status:
promise.append(TDOPromise(self._chain, 0,
self.dev._desc._ir_length))
else:
promise.append(None)
self._promise = promise
return self._promise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_dataset(self, dataset, name, color):
""" Encode a dataset """ |
global palette
html = "{"
html += '\t"label": "' + name + '",'
if color is not None:
html += '"backgroundColor": "' + color + '",\n'
else:
html += '"backgroundColor": ' + palette + ',\n'
html += '"data": ' + self._format_list(dataset) + ',\n'
html += "}"
return html |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, slug, xdata, ydatasets, label, opts, style, ctype):
""" Returns html for a chart """ |
xdataset = self._format_list(xdata)
width = "100%"
height = "300px"
if opts is not None:
if "width" in opts:
width = str(opts["width"])
if "height" in opts:
height = str(opts["height"])
stylestr = '<style>#container_' + slug + \
' { width:' + width + ' !important; height:' + \
height + ' !important}</style>\n'
html = stylestr
html += '<div id="container_' + slug + \
'"><canvas id="canvas_' + slug + '"></canvas></div>\n'
html += '<script>\n'
html += 'var data = {\n'
html += 'labels: ' + xdataset + ',\n'
html += 'datasets:[\n'
colors = None
if "color" in style:
colors = style["color"]
i = 0
for dataset in ydatasets:
name = dataset["name"]
data = dataset["data"]
html += self._get_dataset(data, name, colors)
if i < len(ydatasets) - 1:
html += ","
i += 1
html += ']\n'
html += '}\n'
html += 'window.onload = function() {'
html += 'var ctx = document.getElementById("canvas_' + \
slug + '").getContext("2d");'
html += 'window.myChart = new Chart(ctx, {'
html += 'type: "' + ctype + '",'
html += 'data: data,'
html += 'options: {'
html += 'spanGaps: false,'
html += 'responsive: true,'
html += 'maintainAspectRatio: false,'
if "legend" in opts:
html += 'legend: {'
html += 'position: "' + opts["legend"] + '",'
html += '},'
else:
html += 'legend: {'
html += 'display: false,'
html += '},'
if "title" in opts:
html += 'title: {'
html += 'display: true,'
html += 'text: "' + opts["title"] + '"'
html += '}'
html += '}'
html += '});'
html += '};'
html += '</script>\n'
return html |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_list(self, data):
""" Format a list to use in javascript """ |
dataset = "["
i = 0
for el in data:
if pd.isnull(el):
dataset += "null"
else:
dtype = type(data[i])
if dtype == int or dtype == float:
dataset += str(el)
else:
dataset += '"' + el + '"'
if i < len(data) - 1:
dataset += ', '
dataset += "]"
return dataset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def status(self, status, headers=None):
'''
Respond with given status and no content
:type status: int
:param status: status code to return
:type headers: dict
:param headers: dictionary of headers to add to response
:returns: itself
:rtype: Rule
'''
self.response = _Response(status, headers)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def text(self, text, status=200, headers=None):
'''
Respond with given status and text content
:type text: str
:param text: text to return
:type status: int
:param status: status code to return
:type headers: dict
:param headers: dictionary of headers to add to response
:returns: itself
:rtype: Rule
'''
self.response = _Response(status, headers, text.encode('utf8'))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def matches(self, method, path, headers, bytes=None):
'''
Checks if rule matches given request parameters
:type method: str
:param method: HTTP method, e.g. ``'GET'``, ``'POST'``, etc.
Can take any custom string
:type path: str
:param path: request path including query parameters,
e.g. ``'/users?name=John%20Doe'``
:type bytes: bytes
:param bytes: request body
:returns: ``True`` if this rule matches given params
:rtype: bool
'''
return self._expectation.matches(method, path, headers, bytes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def on(self, method, path=None, headers=None, text=None, json=None):
'''
Sends response to matching parameters one time and removes it from list of expectations
:type method: str
:param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string
:type path: str
:param path: request path including query parameters
:type headers: dict
:param headers: dictionary of headers to expect. If omitted any headers will do
:type text: str
:param text: request text to expect. If ommited any text will match
:type json: dict
:param json: request json to expect. If ommited any json will match,
if present text param will be ignored
:rtype: Rule
:returns: newly created expectation rule
'''
rule = Rule(method, path, headers, text, json)
return self._add_rule_to(rule, self._rules) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def stop(self):
'''
Shuts the server down and waits for server thread to join
'''
self._server.shutdown()
self._server.server_close()
self._thread.join()
self.running = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def send(self, stats):
"Format stats and send to one or more Graphite hosts"
buf = cStringIO.StringIO()
now = int(time.time())
num_stats = 0
# timer stats
pct = stats.percent
timers = stats.timers
for key, vals in timers.iteritems():
if not vals:
continue
# compute statistics
num = len(vals)
vals = sorted(vals)
vmin = vals[0]
vmax = vals[-1]
mean = vmin
max_at_thresh = vmax
if num > 1:
idx = round((pct / 100.0) * num)
tmp = vals[:int(idx)]
if tmp:
max_at_thresh = tmp[-1]
mean = sum(tmp) / idx
key = 'stats.timers.%s' % key
buf.write('%s.mean %f %d\n' % (key, mean, now))
buf.write('%s.upper %f %d\n' % (key, vmax, now))
buf.write('%s.upper_%d %f %d\n' % (key, pct, max_at_thresh, now))
buf.write('%s.lower %f %d\n' % (key, vmin, now))
buf.write('%s.count %d %d\n' % (key, num, now))
num_stats += 1
# counter stats
counts = stats.counts
for key, val in counts.iteritems():
buf.write('stats.%s %f %d\n' % (key, val / stats.interval, now))
buf.write('stats_counts.%s %f %d\n' % (key, val, now))
num_stats += 1
# counter stats
gauges = stats.gauges
for key, val in gauges.iteritems():
buf.write('stats.%s %f %d\n' % (key, val, now))
buf.write('stats_counts.%s %f %d\n' % (key, val, now))
num_stats += 1
buf.write('statsd.numStats %d %d\n' % (num_stats, now))
# TODO: add support for N retries
for host in self._hosts:
# flush stats to graphite
try:
sock = socket.create_connection(host)
sock.sendall(buf.getvalue())
sock.close()
except Exception, ex:
self.error(E_SENDFAIL % ('graphite', host, ex)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_payload(self, *args, **kwargs):
"""Receive all passed in args, kwargs, and combine them together with any required params""" |
if not kwargs:
kwargs = self.default_params
else:
kwargs.update(self.default_params)
for item in args:
if isinstance(item, dict):
kwargs.update(item)
if hasattr(self, 'type_params'):
kwargs.update(self.type_params(*args, **kwargs))
return kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def read_frame(self) -> DataFrame: """Read a single frame from the local buffer. If no frames are available but the stream is still open, waits until more frames arrive. Otherwise, raises StreamConsumedError. When a stream is closed, a single `None` is added to the data frame Queue to wake up any waiting `read_frame` coroutines. """ |
if self._data_frames.qsize() == 0 and self.closed:
raise StreamConsumedError(self.id)
frame = await self._data_frames.get()
self._data_frames.task_done()
if frame is None:
raise StreamConsumedError(self.id)
return frame |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_frame_nowait(self) -> Optional[DataFrame]: """Read a single frame from the local buffer immediately. If no frames are available but the stream is still open, returns None. Otherwise, raises StreamConsumedError. """ |
try:
frame = self._data_frames.get_nowait()
except asyncio.QueueEmpty:
if self.closed:
raise StreamConsumedError(self.id)
return None
self._data_frames.task_done()
if frame is None:
raise StreamConsumedError(self.id)
return frame |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge(a, b, op=None, recurse_list=False, max_depth=None):
"""Immutable merge ``a`` structure with ``b`` using binary operator ``op`` on leaf nodes. All nodes at, or below, ``max_depth`` are considered to be leaf nodes. Merged structure is returned, input data structures are not modified. If ``recurse_list=True``, leaf lists of equal length will be merged on a list-element level. Lists are considered to be leaf nodes by default (``recurse_list=False``), and they are merged with user-provided ``op``. Note the difference:: merge([1, 2], [3, 4]) ==> [1, 2, 3, 4] merge([1, 2], [3, 4], recurse_list=True) ==> [4, 6] """ |
if op is None:
op = operator.add
if max_depth is not None:
if max_depth < 1:
return op(a, b)
else:
max_depth -= 1
if isinstance(a, dict) and isinstance(b, dict):
result = {}
for key in set(chain(a.keys(), b.keys())):
if key in a and key in b:
result[key] = merge(a[key], b[key],
op=op, recurse_list=recurse_list,
max_depth=max_depth)
elif key in a:
result[key] = deepcopy(a[key])
elif key in b:
result[key] = deepcopy(b[key])
return result
elif isinstance(a, list) and isinstance(b, list):
if recurse_list and len(a) == len(b):
# merge subelements
result = []
for idx in range(len(a)):
result.append(merge(a[idx], b[idx],
op=op, recurse_list=recurse_list,
max_depth=max_depth))
return result
else:
# merge lists
return op(a, b)
# all other merge ops should be handled by ``op``.
# default ``operator.add`` will handle addition of numeric types, but fail
# with TypeError for incompatible types (eg. str + None, etc.)
return op(a, b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _param_deprecation_warning(schema, deprecated, context):
"""Raises warning about using the 'old' names for some parameters.
The new naming scheme just has two underscores on each end of the word for consistency
""" |
for i in deprecated:
if i in schema:
msg = 'When matching {ctx}, parameter {word} is deprecated, use __{word}__ instead'
msg = msg.format(ctx = context, word = i)
warnings.warn(msg, Warning) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_perm(self, user, perm, obj=None, *args, **kwargs):
"""Test user permissions for a single action and object. :param user: The user to test. :type user: ``User`` :param perm: The action to test. :type perm: ``str`` :param obj: The object path to test. :type obj: ``tutelary.engine.Object`` :returns: ``bool`` -- is the action permitted? """ |
try:
if not self._obj_ok(obj):
if hasattr(obj, 'get_permissions_object'):
obj = obj.get_permissions_object(perm)
else:
raise InvalidPermissionObjectException
return user.permset_tree.allow(Action(perm), obj)
except ObjectDoesNotExist:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def permitted_actions(self, user, obj=None):
"""Determine list of permitted actions for an object or object pattern. :param user: The user to test. :type user: ``User`` :param obj: A function mapping from action names to object paths to test. :type obj: callable :returns: ``list(tutelary.engine.Action)`` -- permitted actions. """ |
try:
if not self._obj_ok(obj):
raise InvalidPermissionObjectException
return user.permset_tree.permitted_actions(obj)
except ObjectDoesNotExist:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self, name, platform='', genre=''):
""" The name argument is required for this method as per the API server specification. This method also provides the platform and genre optional arguments as filters. """ |
data_list = self.db.get_data(self.list_path, name=name,
platform=platform, genre=genre)
data_list = data_list.get('Data') or {}
games = data_list.get('Game') or []
return [self._build_item(**i) for i in games] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self):
""" No argument is required for this method as per the API server specification. """ |
data_list = self.db.get_data(self.list_path)
data_list = data_list.get('Data') or {}
platforms = (data_list.get('Platforms') or {}).get('Platform') or []
return [self._build_item(**i) for i in platforms] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_none_dict_values(obj):
""" Remove None values from dict. """ |
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none_dict_values(x) for x in obj)
elif isinstance(obj, dict):
return type(obj)((k, remove_none_dict_values(v))
for k, v in obj.items()
if v is not None)
else:
return obj |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.