text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mag_calibration(self):
"""Perform magnetometer calibration for current IMU.""" |
self.calibration_state = self.CAL_MAG
self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self)
if self.mag_dialog.exec_() == QDialog.Rejected:
return
self.calculate_mag_calibration(self.mag_dialog.samples) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_gyro_calibration(self, gyro_samples):
"""Performs a basic gyroscope bias calculation. Takes a list of (x, y, z) samples and averages over each axis to calculate the bias values, and stores them in the calibration data structure for the currently connected SK8""" |
totals = [0, 0, 0]
for gs in gyro_samples:
totals[0] += gs[0]
totals[1] += gs[1]
totals[2] += gs[2]
for i in range(3):
totals[i] = int(float(totals[i]) / len(gyro_samples))
print('Saving gyro offsets for {}'.format(self.current_imuid))
self.calibration_data[self.current_imuid][self.GYROX_OFFSET] = str(totals[0])
self.calibration_data[self.current_imuid][self.GYROY_OFFSET] = str(totals[1])
self.calibration_data[self.current_imuid][self.GYROZ_OFFSET] = str(totals[2])
self.calibration_data[self.current_imuid][self.GYRO_TIMESTAMP] = datetime.now().isoformat()
self.write_calibration_data()
self.update_data_display(self.calibration_data[self.current_imuid])
self.calibration_state = self.CAL_NONE |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def device_selected(self, index):
"""Handler for selecting a device from the list in the UI""" |
device = self.devicelist_model.itemFromIndex(index)
print(device.device.addr)
self.btnConnect.setEnabled(True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_block(self, task, disable=False):
""" Handles blocking domains using hosts file. `task` ``Task`` instance. `disable` Set to ``True``, to turn off blocking and restore hosts file; otherwise, ``False`` will enable blocking by updating hosts file. Returns boolean. """ |
backup_file = os.path.join(task.task_dir, '.hosts.bak')
self.orig_data = self.orig_data or common.readfile(backup_file)
self.last_updated = self.last_updated or -1
if not self.orig_data:
# should't attempt restore without good original data, bail
if disable:
return False
# attempt to fetch data from the source
self.orig_data = common.readfile(self.hosts_file)
if not self.orig_data:
return False
# restore backup
if not os.path.exists(backup_file):
common.writefile(backup_file, self.orig_data)
# bail early if hosts file modification time hasn't changed
try:
should_write = (disable or self.last_updated
!= os.path.getmtime(self.hosts_file))
except OSError:
should_write = True # file was removed, let's write!
if not should_write:
return True
# make copy of original data, in case we need to modify
data = self.orig_data
# if not restoring, tack on domains mapped
# to localhost to end of file data
if not disable:
# convert the set to a list and sort
domains = list(self.domains)
domains.sort()
data += ('\n'.join('127.0.0.1\t{0}\t# FOCUS'
.format(d) for d in domains) + '\n')
# make temp file with new host file data
with tempfile.NamedTemporaryFile(prefix='focus_') as tempf:
tempf.write(data)
tempf.flush()
# overwrite hosts file with our modified copy.
if not self.run_root('cp "{0}" "{1}"'.format(tempf.name,
self.hosts_file)):
return False
# MacOS X generally requires flushing the system dns cache to pick
# up changes to the hosts file:
# dscacheutil -flushcache or lookupd -flushcache
if common.IS_MACOSX:
dscacheutil, lookupd = [common.which(x) for x in
('dscacheutil', 'lookupd')]
self.run_root(' '.join([dscacheutil or lookupd,
'-flushcache']))
if disable:
common.safe_remove_file(backup_file) # cleanup the backup
# store last modification time
try:
self.last_updated = os.path.getmtime(self.hosts_file)
except OSError:
# file was removed, let's update next time around
self.last_updated = -1
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_option(self, option, block_name, *values):
""" Parse domain values for option. """ |
_extra_subs = ('www', 'm', 'mobile')
if len(values) == 0: # expect some values here..
raise ValueError
for value in values:
value = value.lower()
# if it doesn't look like a protocol, assume http
# (e.g. only domain supplied)
if not _RE_PROTOCOL.match(value):
value = 'http://' + value
# did it parse? pull hostname/domain
parsed = urlparse.urlparse(value)
if parsed:
domain = parsed.hostname
if domain and _RE_TLD.search(domain): # must have a TLD
# doesn't have subdomain, tack on www, m, and mobile
# for good measure. note, this check fails for
# multi-part TLDs, e.g. .co.uk
domain = _RE_WWW_SUB.sub('', domain) # strip "www."
if len(domain.split('.')) == 2:
for sub in _extra_subs:
self.domains.add('{0}.{1}'.format(sub, domain))
self.domains.add(domain)
# no domains.. must have failed
if not self.domains:
raise ValueError |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sign(self, data):
""" Create url-safe signed token. :param data: Data to sign :type data: object """ |
try:
jsonstr = json.dumps(data, separators=(',', ':'))
except TypeError as e:
raise DataSignError(e.args[0])
else:
signature = self._create_signature(jsonstr)
return self._b64encode(jsonstr + '.' + signature) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unsign(self, b64msg):
""" Retrieves data from signed token. :param b64msg: Token to unsign :type b64msg: str """ |
msg = self._b64decode(b64msg)
try:
body, signature = msg.rsplit('.', 1)
except ValueError as e:
raise MalformedSigendMessage(e.args[0])
else:
if signature == self._create_signature(body):
try:
return json.loads(body)
except ValueError as e:
raise MalformedSigendMessage(e.args[0])
else:
raise BadSignature() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_file_name(content_dispo):
"""Extract file name from the input request body""" |
# print type(content_dispo)
# print repr(content_dispo)
# convertion of escape string (str type) from server
# to unicode object
content_dispo = content_dispo.decode('unicode-escape').strip('"')
file_name = ""
for key_val in content_dispo.split(';'):
param = key_val.strip().split('=')
if param[0] == "filename":
file_name = param[1].strip('"')
break
return file_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(self, uuid, url, forced_file_name=None, progress_bar=True, chunk_size=256, directory=None, overwrite=False):
""" download a file from LinShare using its rest api. This method could throw exceptions like urllib2.HTTPError.""" |
self.last_req_time = None
url = self.get_full_url(url)
self.log.debug("download url : " + url)
# Building request
request = urllib2.Request(url)
# request.add_header('Content-Type', 'application/json; charset=UTF-8')
request.add_header('Accept', 'application/json,*/*;charset=UTF-8')
# request start
starttime = datetime.datetime.now()
# doRequest
resultq = urllib2.urlopen(request)
code = resultq.getcode()
file_name = uuid
self.log.debug("ret code : '" + str(code) + "'")
if code == 200:
content_lenth = resultq.info().getheader('Content-Length')
if not content_lenth:
msg = "No content lengh header found !"
self.log.debug(msg)
progress_bar = False
else:
file_size = int(content_lenth.strip())
if forced_file_name:
file_name = forced_file_name
else:
content_dispo = resultq.info().getheader('Content-disposition')
if content_dispo:
content_dispo = content_dispo.strip()
file_name = extract_file_name(content_dispo)
if directory:
if os.path.isdir(directory):
file_name = directory + "/" + file_name
if os.path.isfile(file_name):
if not overwrite:
cpt = 1
while 1:
if not os.path.isfile(file_name + "." + str(cpt)):
file_name += "." + str(cpt)
break
cpt += 1
else:
self.log.warn("'%s' already exists. It was overwriten.",
file_name)
stream = None
pbar = None
if progress_bar:
widgets = [FileTransferSpeed(), ' <<<', Bar(), '>>> ',
Percentage(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=file_size)
stream = FileWithCallback(file_name, 'w', pbar.update,
file_size, file_name)
pbar.start()
else:
stream = file(file_name, 'w')
while 1:
chunk = resultq.read(chunk_size)
if not chunk:
break
stream.write(chunk)
stream.flush()
stream.close()
if pbar:
pbar.finish()
# request end
endtime = datetime.datetime.now()
self.last_req_time = str(endtime - starttime)
self.log.debug("download url : %(url)s : request time : %(time)s",
{"url": url,
"time": self.last_req_time})
return (file_name, self.last_req_time) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_field(self, field, arg=None, value=None, extended=False, hidden=False, e_type=str, required=None):
"""Add a new field to the current ResourceBuilder. Keyword arguments: field -- field name arg -- name of the attribute name in arg object (argparse) value -- a default for this field, used for resource creation. extended -- If set to true, the current field will be display in extended list mode only. hidden -- If set to true, the current field won't be exposed as available keys. e_type -- field data type (default str):
int, float, str required -- True if the current field is required for create and update methods """ |
if required is None:
required = self._required
if arg is None:
arg = re.sub('(?!^)([A-Z]+)', r'_\1', field).lower()
self._fields[field] = {
'field': field,
'arg': arg,
'value': value,
'extended': extended,
'required': required,
'e_type': e_type,
'hidden': hidden
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kill(self):
"""Kill instantiated process :raises: `AttributeError` if instantiated process doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_ """ |
BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)
BaseShellOperator._wait_process(self._process, self._batcmd.sh_cmd, self._success_exitcodes)
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)
self._process = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def env_timestamp(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a ``datetime.datetime`` object. The environment variable is expected to be a timestamp in the form of a float. If the name is not present in the environment and no default is specified then a ``ValueError`` will be raised. :param name: The name of the environment variable be pulled :type name: str :param required: Whether the environment variable is required. If ``True`` and the variable is not present, a ``KeyError`` is raised. :type required: bool :param default: The value to return if the environment variable is not present. (Providing a default alongside setting ``required=True`` will raise a ``ValueError``) :type default: bool """ |
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_timestamp` requires either a default value to be specified, "
"or for the variable to be present in the environment"
)
timestamp = float(value)
return datetime.datetime.fromtimestamp(timestamp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def env_iso8601(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and parses it to a ``datetime.datetime`` object. The environment variable is expected to be an iso8601 formatted string. If the name is not present in the environment and no default is specified then a ``ValueError`` will be raised. :param name: The name of the environment variable be pulled :type name: str :param required: Whether the environment variable is required. If ``True`` and the variable is not present, a ``KeyError`` is raised. :type required: bool :param default: The value to return if the environment variable is not present. (Providing a default alongside setting ``required=True`` will raise a ``ValueError``) :type default: bool """ |
try:
import iso8601
except ImportError:
raise ImportError(
'Parsing iso8601 datetime strings requires the iso8601 library'
)
if required and default is not empty:
raise ValueError("Using `default` with `required=True` is invalid")
value = get_env_value(name, required=required, default=empty)
# change datetime.datetime to time, return time.struct_time type
if default is not empty and value is empty:
return default
if value is empty:
raise ValueError(
"`env_iso8601` requires either a default value to be specified, or "
"for the variable to be present in the environment"
)
return iso8601.parse_date(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_es_workers(self, **kwargs):
""" Creates index worker instances for each class to index kwargs: ------- idx_only_base[bool]: True will only index the base class """ |
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
"""
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
"""
new_esbase = copy.copy(search_conn)
new_esbase.es_index = es_index
new_esbase.doc_type = es_doc_type
log.info("Indexing '%s' into ES index '%s' doctype '%s'",
class_name.pyuri,
es_index,
es_doc_type)
return new_esbase
def additional_indexers(rdf_class):
"""
returns additional classes to index based off of the es definitions
"""
rtn_list = rdf_class.es_indexers()
rtn_list.remove(rdf_class)
return rtn_list
self.es_worker = make_es_worker(self.search_conn,
self.es_index,
self.es_doc_type,
self.rdf_class.__name__)
if not kwargs.get("idx_only_base"):
self.other_indexers = {item.__name__: make_es_worker(
self.search_conn,
item.es_defs.get('kds_esIndex')[0],
item.es_defs.get('kds_esDocType')[0],
item.__name__)
for item in additional_indexers(self.rdf_class)}
else:
self.other_indexers = {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _index_sub(self, uri_list, num, batch_num):
""" Converts a list of uris to elasticsearch json objects args: uri_list: list of uris to convert num: the ending count within the batch batch_num: the batch number """ |
bname = '%s-%s' % (batch_num, num)
log.debug("batch_num '%s' starting es_json conversion",
bname)
qry_data = get_all_item_data([item[0] for item in uri_list],
self.tstore_conn,
rdfclass=self.rdf_class)
log.debug("batch_num '%s-%s' query_complete | count: %s",
batch_num,
num,
len(qry_data))
# path = os.path.join(CFG.dirs.cache, "index_pre")
# if not os.path.exists(path):
# os.makedirs(path)
# with open(os.path.join(path, bname + ".json"), "w") as fo:
# fo.write(json.dumps(qry_data))
data = RdfDataset(qry_data)
del qry_data
log.debug("batch_num '%s-%s' RdfDataset Loaded", batch_num, num)
for value in uri_list:
try:
self.batch_data[batch_num]['main'].append(\
data[value[0]].es_json())
self.count += 1
except KeyError:
pass
for name, indexer in self.other_indexers.items():
for item in data.json_qry("$.:%s" % name.pyuri):
val = item.es_json()
if val:
self.batch_data[batch_num][name].append(val)
self.batch_uris[batch_num].append(item.subject)
del data
del uri_list
log.debug("batch_num '%s-%s' converted to es_json", batch_num, num) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_idx_status(self, rdf_class):
""" Removes all of the index status triples from the datastore Args: ----- rdf_class: The class of items to remove the status from """ |
sparql_template = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?rdftypes {{\n\t\t{} }} .
?s a ?rdftypes .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
FILTER(bound(?esTime)||bound(?esError))
}}
"""
rdf_types = [rdf_class.uri] + [item.uri
for item in rdf_class.subclasses]
sparql = sparql_template.format("\n\t\t".join(rdf_types))
log.warn("Deleting index status for %s", rdf_class.uri)
return self.tstore_conn.update_query(sparql) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_es_ids(self):
""" reads all the elasticssearch ids for an index """ |
search = self.search.source(['uri']).sort(['uri'])
es_ids = [item.meta.id for item in search.scan()]
return es_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_index(self, rdf_class):
""" Will compare the triplestore and elasticsearch index to ensure that that elasticsearch and triplestore items match. elasticsearch records that are not in the triplestore will be deleteed """ |
es_ids = set(self.get_es_ids())
tstore_ids = set([item[1]
for item in self.get_uri_list(no_status=True)])
diff = es_ids - tstore_ids
if diff:
pdb.set_trace()
action_list = self.es_worker.make_action_list(diff,
action_type="delete")
results = self.es_worker.bulk_save(action_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _disable_prometheus_process_collector(self) -> None: """ There is a bug in SDC' Docker implementation and intolerable prometheus_client code, due to which its process_collector will fail. See https://github.com/prometheus/client_python/issues/80 """ |
logger.info("Removing prometheus process collector")
try:
core.REGISTRY.unregister(PROCESS_COLLECTOR)
except KeyError:
logger.debug("PROCESS_COLLECTOR already removed from prometheus") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connection(self, handshake=None):
""" Connects if necessary, returns existing one if it can. :param handshake: A function to be called with the client to complete the handshake. :returns: thrift connection, deferred if necessary """ |
if self._state == _State.CONNECTED:
return succeed(self._current_client)
elif self._state == _State.DISCONNECTING:
return fail(ClientDisconnecting())
elif self._state == _State.NOT_CONNECTED:
d = self._notify_on_connect()
self._connect(handshake)
return d
else:
assert self._state == _State.CONNECTING
return self._notify_on_connect() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_single_char(func):
""" Decorator that ensures that the first argument of the decorated function is a single character, i.e. a string of length one. """ |
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not isinstance(args[0], str) or len(args[0]) != 1:
raise ValueError((
'This function should be invoked with a string of length one '
'as its first argument'))
return func(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vowel(char):
""" Check whether the character is a vowel letter. """ |
if is_letter(char, strict=True):
return char in chart.vowels
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_suprasegmental(char, strict=True):
""" Check whether the character is a suprasegmental according to the IPA spec. This includes tones, word accents, and length markers. In strict mode return True only if the diacritic is part of the IPA spec. """ |
if (char in chart.suprasegmentals) or (char in chart.lengths):
return True
return is_tone(char, strict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_substitutes(string):
""" Return the given string with all known common substitutes replaced with their IPA-compliant counterparts. """ |
for non_ipa, ipa in chart.replacements.items():
string = string.replace(non_ipa, ipa)
return string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_ipa(self, file_path):
""" Populate the instance's set properties using the specified file. """ |
sections = {
'# consonants (pulmonic)': self.consonants,
'# consonants (non-pulmonic)': self.consonants,
'# other symbols': self.consonants,
'# tie bars': self.tie_bars,
'# vowels': self.vowels,
'# diacritics': self.diacritics,
'# suprasegmentals': self.suprasegmentals,
'# lengths': self.lengths,
'# tones and word accents': self.tones }
curr_section = None
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line.startswith('#'):
if line in sections:
curr_section = sections[line]
else:
curr_section = None
elif line:
if curr_section is not None:
curr_section.add(line.split('\t')[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_replacements(self, file_path):
""" Populate self.replacements using the specified file. """ |
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line:
line = line.split('\t')
self.replacements[line[0]] = line[1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort_by_name(names):
"""Sort by last name, uniquely.""" |
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_attribute(self, key, value):
"""Store blame info we are interested in.""" |
if key == 'summary' or key == 'filename' or key == 'previous':
return
attr = key.replace('-', '_')
if key.endswith('-time'):
value = int(value)
setattr(self, attr, value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_git_file(cls, path, name):
"""Determine if file is known by git.""" |
os.chdir(path)
p = subprocess.Popen(['git', 'ls-files', '--error-unmatch', name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
return p.returncode == 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_modules(self):
"""Generator to look for git files in tree. Will handle all lines.""" |
for path, dirlist, filelist in os.walk(self.root):
for name in fnmatch.filter(filelist, self.filter):
if self.is_git_file(path, name):
yield (os.path.join(path, name), []) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges. If no line range tuples are provided, it will do all lines. """ |
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unique_authors(self, limit):
"""Unique list of authors, but preserving order.""" |
seen = set()
if limit == 0:
limit = None
seen_add = seen.add # Assign to variable, so not resolved each time
return [x.author for x in self.sorted_commits[:limit]
if not (x.author in seen or seen_add(x.author))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show(self, commit):
"""Display one commit line. The output will be: <uuid> <#lines> <author> <short-commit-date> If verbose flag set, the output will be: <uuid> <#lines> <author+email> <long-date> <committer+email> """ |
author = commit.author
author_width = 25
committer = ''
commit_date = date_to_str(commit.committer_time, commit.committer_tz,
self.verbose)
if self.verbose:
author += " %s" % commit.author_mail
author_width = 50
committer = " %s %s" % (commit.committer, commit.committer_mail)
return " {} {:>5d} {:{}s} {}{}".format(
commit.uuid[:8], commit.line_count, author, author_width,
commit_date, committer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_user_commits(cls, commits):
"""Merge all the commits for the user. Aggregate line counts, and use the most recent commit (by date/time) as the representative commit for the user. """ |
user = None
for commit in commits:
if not user:
user = commit
else:
if commit.committer_time > user.committer_time:
commit.line_count += user.line_count
user = commit
else:
user.line_count += commit.line_count
return user |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(self):
"""Sort by commit size, per author.""" |
# First sort commits by author email
users = []
# Group commits by author email, so they can be merged
for _, group in itertools.groupby(sorted(self.commits),
operator.attrgetter('author_mail')):
if group:
users.append(self.merge_user_commits(group))
# Finally sort by the (aggregated) commits' line counts
self.sorted_commits = sorted(users,
key=operator.attrgetter('line_count'),
reverse=True)
return self.sorted_commits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_ranges(cls, lines):
"""Convert list of lines into list of line range tuples. Only will be called if there is one or more entries in the list. Single lines, will be coverted into tuple with same line. """ |
start_line = last_line = lines.pop(0)
ranges = []
for line in lines:
if line == (last_line + 1):
last_line = line
else:
ranges.append((start_line, last_line))
start_line = line
last_line = line
ranges.append((start_line, last_line))
return ranges |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def determine_coverage(cls, coverage_file):
"""Scan the summary section of report looking for coverage data. Will see CSS class with "stm mis" (missing coverage), or "stm par" (partial coverage), and can extract line number. Will get file name from title tag. """ |
lines = []
source_file = 'ERROR'
for line in coverage_file:
m = title_re.match(line)
if m:
if m.group(2) == '100':
return ('', [])
source_file = m.group(1)
continue
m = source_re.match(line)
if m:
lines.append(int(m.group(1)))
continue
if end_re.match(line):
break
line_ranges = cls.make_ranges(lines)
return (source_file, line_ranges) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_modules(self):
"""Generator to obtain lines of interest from coverage report files. Will verify that the source file is within the project tree, relative to the coverage directory. """ |
coverage_dir = os.path.join(self.root, 'cover')
for name in fnmatch.filter(os.listdir(coverage_dir), "*.html"):
if name == 'index.html':
continue
with open(os.path.join(coverage_dir, name)) as cover_file:
src_file, line_ranges = self.determine_coverage(cover_file)
if not src_file:
continue
src_file = os.path.abspath(os.path.join(self.root, src_file))
if os.path.isfile(src_file):
yield (src_file, line_ranges)
else:
raise SourceNotFound(
"Source file %(file)s not found at %(area)s" %
{'file': os.path.basename(src_file),
'area': os.path.dirname(src_file)}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sort(self):
"""Consolidate adjacent lines, if same commit ID. Will modify line number to be a range, when two or more lines with the same commit ID. """ |
self.sorted_commits = []
if not self.commits:
return self.sorted_commits
prev_commit = self.commits.pop(0)
prev_line = prev_commit.line_number
prev_uuid = prev_commit.uuid
for commit in self.commits:
if (commit.uuid != prev_uuid or
commit.line_number != (prev_line + 1)):
prev_commit.lines = self.line_range(prev_commit.line_number,
prev_line)
self.sorted_commits.append(prev_commit)
prev_commit = commit
prev_line = commit.line_number
prev_uuid = commit.uuid
# Take care of last commit
prev_commit.lines = self.line_range(prev_commit.line_number, prev_line)
self.sorted_commits.append(prev_commit)
return self.sorted_commits |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(url, content, **args):
"""Put an object into a ftps URL.""" |
with FTPSResource(url, **args) as resource:
resource.write(content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def yaml_str_join(l, n):
'''
YAML loader to join strings
The keywords are as following:
* `hostname`: Your hostname (from :func:`util.system.get_hostname`)
* `timestamp`: Current timestamp (from :func:`util.system.get_timestamp`)
:returns:
A `non character` joined string |yaml_loader_returns|
.. note::
Be careful with timestamps when using a `config` in :ref:`settings`.
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.system import get_hostname, get_timestamp
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq == 'hostname':
s[num] = '%s' % (get_hostname())
elif seq == 'timestamp':
s[num] = '%s' % (get_timestamp())
return ''.join([str(i) for i in s]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dict_merge(o, v):
'''
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
'''
if not isinstance(v, dict):
return v
res = _deepcopy(o)
for key in v.keys():
if res.get(key) and isinstance(res[key], dict):
res[key] = dict_merge(res[key], v[key])
else:
res[key] = _deepcopy(v[key])
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def this(obj, **kwargs):
"""Prints series of debugging steps to user. Runs through pipeline of functions and print results of each. """ |
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? ")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self.""" |
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_is_alignment_line(parts, s1_name, s2_name):
""" return true if the tokenized line is a repeatmasker alignment line. :param parts: the line, already split into tokens around whitespace :param s1_name: the name of the first sequence, as extracted from the header of the element this line is in :param s2_name: the name of the second sequence, as extracted from the header of the element this line is in """ |
if len(parts) < 2:
return False
if _rm_name_match(parts[0], s1_name):
return True
if (_rm_name_match(parts[0], s2_name) or
(parts[0] == "C" and _rm_name_match(parts[1], s2_name))):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_is_header_line(parts, n):
""" determine whether a pre-split string is a repeat-masker alignment header. headers have no special structure or symbol to mark them, so this is based only on the number of elements, and what data type they are. """ |
if (n == 15 and parts[8] == "C"):
return True
if (n == 14 and parts[0].isdigit()):
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_get_names_from_header(parts):
""" get repeat and seq. name from repeatmasker alignment header line. An example header line is:: 239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4 the genomic sequence name is always at position 4 (zero-based index); the name of the repeat is at position 9 if matching the reverse complement of the consensus sequence for the repeat and position 8 otherwise :param parts: the header line, as a tokenized list. :return: tuple of (name of genomic sequence, name of repeat sequence) """ |
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
return (parts[4], parts[8]) if len(parts) == 14 else (parts[4], parts[9]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_get_repeat_coords_from_header(parts):
""" extract the repeat coordinates of a repeat masker match from a header line. An example header line is:: 239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4 239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4 if the match is to the reverse complement, the start and end coordinates are at positions 11 and 12 (zero-based indexes), otherwise they're at positions 9 and 10. In the later case, the 'start' is the earlier number and the end is the larger one. In reverse complement matches, RM lists the 'start' as the larger number and the end as the smaller one. We swap these around to match the Pyokit convention of start < end always and also adjust the end so it is not inclusive of the last position :param parts: the header line, as a tokenized list. :return: tuple of (start, end) """ |
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
if len(parts) == 14:
s = int(parts[9])
e = int(parts[10]) + 1
else:
s = int(parts[12])
e = int(parts[11]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_name_match(s1, s2):
""" determine whether two sequence names from a repeatmasker alignment match. :return: True if they are the same string, or if one forms a substring of the other, else False """ |
m_len = min(len(s1), len(s2))
return s1[:m_len] == s2[:m_len] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rm_extract_sequence_and_name(alig_str_parts, s1_name, s2_name):
""" parse an alignment line from a repeatmasker alignment and return the name of the sequence it si from and the sequence portion contained in the line. :param alig_str_parts: the alignment string, split around whitespace as list :param s1_name: the name of the first sequence in the alignment this line is from :param s2_name: the name of the second sequence in the alignment this line is from :return: a tuple of name and sequence string; name will always be either s1_name or s2_name :raise AlignmentIteratorError: if the line doesn't have the expected number of elements, or the name does not match either of s1_name or s2_name """ |
# first, based on the number of parts we have we'll guess whether its a
# reverse complement or not
if len(alig_str_parts) == 4:
# expect the first element to amtch something..
nm = alig_str_parts[0]
seq = alig_str_parts[2]
elif len(alig_str_parts) == 5:
# expect the second element to match something...
nm = alig_str_parts[1]
seq = alig_str_parts[3]
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"expected this line to have 4 or 5 " +
"elements, but it has " +
str(len(alig_str_parts)))
if _rm_name_match(nm, s1_name):
return s1_name, seq
elif _rm_name_match(nm, s2_name):
return s2_name, seq
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"extracted alignment name (" + nm + ") " +
"did not match either sequence name from " +
"header line (" + s1_name + " or " +
s2_name + ")") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_file(path):
""" Scan `path` for viruses using ``clamd`` antivirus daemon. Args: path (str):
Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: ValueError: When the server is not running. AssertionError: When the internal file doesn't exists. """ |
path = os.path.abspath(path)
assert os.path.exists(path), "Unreachable file '%s'." % path
try:
cd = pyclamd.ClamdUnixSocket()
cd.ping()
except pyclamd.ConnectionError:
cd = pyclamd.ClamdNetworkSocket()
try:
cd.ping()
except pyclamd.ConnectionError:
raise ValueError(
"Couldn't connect to clamd server using unix/network socket."
)
cd = pyclamd.ClamdUnixSocket()
assert cd.ping(), "clamd server is not reachable!"
result = cd.scan_file(path)
return result if result else {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_best_local_timezone():
""" Compares local timezone offset to pytz's timezone db, to determine a matching timezone name to use when TIME_ZONE is not set. """ |
zone_name = tzlocal.get_localzone().zone
if zone_name in pytz.all_timezones:
return zone_name
if time.daylight:
local_offset = time.altzone
localtz = time.tzname[1]
else:
local_offset = time.timezone
localtz = time.tzname[0]
local_offset = datetime.timedelta(seconds=-local_offset)
for zone_name in pytz.all_timezones:
timezone = pytz.timezone(zone_name)
if not hasattr(timezone, '_tzinfos'):
continue
for utcoffset, daylight, tzname in timezone._tzinfos:
if utcoffset == local_offset and tzname == localtz:
return zone_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def topic_quick_links(context, topic, latest, last_seen_time):
""" Creates topic listing page links for the given topic, with the given number of posts per page. Topics with between 2 and 5 pages will have page links displayed for each page. Topics with more than 5 pages will have page links displayed for the first page and the last 3 pages. """ |
output_text = u''
pages = topic.page_count
if not pages or pages == 0:
hits = topic.post_count - 1
if hits < 1:
hits = 1
pages = hits // PAGINATE_BY + 1
# determine if we need to show new link.
if latest and latest.post_date_int > last_seen_time:
output_text += '<a href="{0}#first-new-post" class="new">new</a>'.format(topic.last_url)
# If only one post (or none) only return new link, if anything.
if topic.post_count < 2:
output_text += '1 post'
else:
# We have more than one post. Create last link
if latest:
last_link = '<a href="{0}#post-{1}" title="latest post">'.format(topic.last_url, latest.id)
else:
last_link = ''
# if only one page, just show the number of posts wrapped in a link
if pages < 2:
output_text += '{0}{1} posts</a>'.format(last_link, topic.post_count)
else:
page_link = u'<a href="%spage%%s/">%%s</a>' % topic.get_short_url()
if pages < 4:
page_links = u' '.join([page_link % (page, page) for page in range(1, pages + 1)])
else:
page_links = u' '.join(
[page_link % (1, 1), u'<small>…</small>'] +
[page_link % (page, page) for page in range(pages - 1, pages + 1)]
)
output_text += '{0}{1}►</a>'.format(page_links, last_link)
return {
'output_text': output_text,
'topic': topic,
'forum_slug': context['forum_slug'],
'user': context['user'],
'perms': context['perms']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ignore_user_agent(user_agent):
""" compare the useragent from the broswer to the ignore list This is popular if you want a mobile device to not trigger as mobile. For example iPad.""" |
if user_agent:
for ua in MOBI_USER_AGENT_IGNORE_LIST:
if ua and ua.lower() in user_agent.lower():
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_request(request):
"""Adds a "mobile" attribute to the request which is True or False depending on whether the request should be considered to come from a small-screen device such as a phone or a PDA""" |
if 'HTTP_X_OPERAMINI_FEATURES' in request.META:
# Then it's running opera mini. 'Nuff said.
# Reference from:
# http://dev.opera.com/articles/view/opera-mini-request-headers/
request.mobile = True
return None
if 'HTTP_ACCEPT' in request.META:
s = request.META['HTTP_ACCEPT'].lower()
if 'application/vnd.wap.xhtml+xml' in s:
# Then it's a wap browser
request.mobile = True
return None
if 'HTTP_USER_AGENT' in request.META:
# This takes the most processing. Surprisingly enough, when I
# Experimented on my own machine, this was the most efficient
# algorithm. Certainly more so than regexes.
# Also, Caching didn't help much, with real-world caches.
s = request.META['HTTP_USER_AGENT'].lower()
for ua in search_strings:
if ua in s:
# check if we are ignoring this user agent: (IPad)
if not ignore_user_agent(s):
request.mobile = True
if MOBI_DETECT_TABLET:
request.tablet = _is_tablet(s)
return None
# Otherwise it's not a mobile
request.mobile = False
request.tablet = False
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_by_builder(self, builder: Builder, stats: BuildProcessStats):
""" run one builder, return statistics about the run """ |
logger = logging.getLogger(__name__)
target_signature = builder.get_signature()
assert target_signature is not None, "builder signature is None"
if self.cache.list_sig_ok(target_signature):
logger.info("verifying [{}]".format(builder.get_name()))
file_bad = 0
file_correct = 0
file_missing = 0
file_total = 0
list_filename = self.cache.get_list_filename(target_signature)
for object_name, signature in Cache.iterate_objects(list_filename):
filename = self.cache.get_object_filename(signature)
if os.path.isfile(object_name):
object_name_signature = sha1_file(object_name)
if object_name_signature != signature:
# logger.info("file [{}] is incorrect. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_sha1(filename, object_name)
file_bad += 1
else:
# logger.info("file [{}] is up to date".format(object_name))
stats.add_nop(filename, object_name)
file_correct += 1
else:
# logger.info("file [{}] is missing. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_missing(filename, object_name)
file_missing += 1
file_total += 1
if file_bad > 0 or file_missing > 0:
logger.info("Retrieved {} files from cache (bad/correct/missing = {}/{}/{}".format(
file_total, file_bad, file_correct, file_missing))
else:
logger.info("ok [{}]".format(builder.get_name()))
else:
# this is one of the rare cases in which really want to catch all exceptions.
# noinspection PyBroadException
try:
logger.info("running [{}]".format(builder.get_name()))
builder.build()
logger.info("ok [{}]".format(builder.get_name()))
stats.add_builder_ok(builder)
# first lets build a list of what was constructed
targets = builder.get_targets()
targets.extend(builder.get_targets_post_build())
content = ""
for target in targets:
signature = sha1_file(target)
content += target + " " + signature + "\n"
self.cache.save_object_by_signature(signature, target)
self.cache.save_list_by_signature(target_signature, content)
except Exception as e:
logger.info("failed [{}]".format(builder.get_name()))
logger.info("exception [{}]".format(e))
stats.add_builder_fail(builder, e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def anomalous_score(self):
"""Anomalous score of this reviewer. Initial anomalous score is :math:`1 / |R|` where :math:`R` is a set of reviewers. """ |
return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(self):
"""Summary of reviews for this product. Initial summary is computed by .. math:: \\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r), where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`. """ |
if self._summary:
return self._summary
reviewers = self._graph.retrieve_reviewers(self)
return self._summary_cls(
[self._graph.retrieve_review(r, self) for r in reviewers]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summary(self, v):
"""Set summary. Args: v: A new summary. It could be a single number or lists. """ |
if hasattr(v, "__iter__"):
self._summary = self._summary_cls(v)
else:
self._summary = self._summary_cls(float(v)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_summary(self, w):
"""Update summary. The new summary is a weighted average of reviews i.e. .. math:: \\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)} {\\sum_{r \\in R} \\mbox{weight}(r)}, where :math:`R` is a set of reviewers reviewing this product, :math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are the review and weight of the reviewer :math:`r`, respectively. Args: w: A weight function. Returns: absolute difference between old summary and updated one. """ |
old = self.summary.v # pylint: disable=no-member
reviewers = self._graph.retrieve_reviewers(self)
reviews = [self._graph.retrieve_review(
r, self).score for r in reviewers]
weights = [w(r.anomalous_score) for r in reviewers]
if sum(weights) == 0:
self.summary = np.mean(reviews)
else:
self.summary = np.average(reviews, weights=weights)
return abs(self.summary.v - old) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def new_reviewer(self, name, anomalous=None):
"""Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance. """ |
n = self._reviewer_cls(
self, name=name, credibility=self.credibility, anomalous=anomalous)
self.graph.add_node(n)
self.reviewers.append(n)
return n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def new_product(self, name):
"""Create a new product. Args: name: name of the new product. Returns: A new product instance. """ |
n = self._product_cls(self, name, summary_cls=self._summary_cls)
self.graph.add_node(n)
self.products.append(n)
return n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_review(self, reviewer, product, review, date=None):
"""Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ |
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
r = self._review_cls(review, date=date)
self.graph.add_edge(reviewer, product, review=r)
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve_products(self, reviewer):
"""Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed. """ |
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve_reviewers(self, product):
"""Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed. """ |
if not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
return list(self.graph.predecessors(product)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retrieve_review(self, reviewer, product):
"""Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product. """ |
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
try:
return self.graph[reviewer][product]["review"]
except TypeError:
raise KeyError(
"{0} does not review {1}.".format(reviewer, product)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _weight_generator(self, reviewers):
"""Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer. """ |
scores = [r.anomalous_score for r in reviewers]
mu = np.average(scores)
sigma = np.std(scores)
if sigma:
def w(v):
"""Compute a weight for the given reviewer.
Args:
v: anomalous score of a reviewer.
Returns:
weight of the given anomalous score.
"""
try:
exp = math.exp(self.alpha * (v - mu) / sigma)
return 1. / (1. + exp)
except OverflowError:
return 0.
return w
else:
# Sigma = 0 means all reviews have same anomalous scores.
# In this case, all reviews should be treated as same.
return lambda v: 1. |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_credibilities(self, output):
"""Dump credibilities of all products. Args: output: a writable object. """ |
for p in self.products:
json.dump({
"product_id": p.name,
"credibility": self.credibility(p)
}, output)
output.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_dictionaries(a, b):
"""Merge two dictionaries; duplicate keys get value from b.""" |
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __build_sequence(parts):
"""Build a sequence object using the pre-tokenized parts from a MAF line. s -- a sequence line; has 6 fields in addition to 's': * source sequence, * start coord. of seq., zero-based. If -'ve strand, rel to start of rev. comp. * ungapped length of the sequence * strand * src size -- the full length of the source sequence * the sequence itself """ |
strand = parts[4]
seq_length = int(parts[3])
total_seq_len = int(parts[5])
start = (int(parts[2]) if strand == "+"
else total_seq_len - int(parts[2]) - seq_length)
end = start + seq_length
remain = total_seq_len - end
return Sequence(parts[1], parts[6], start, end, strand, remain) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __annotate_sequence_with_context(seq, i_line_parts):
"""Extract meta data from pre-tokenized maf i-line and populate sequence. i -- always come after s lines, and contain information about the context of the sequence. Five fields are given, not counting the 'i' * source sequence (must match s line before this) * left status (see below) * left count; num of bases in source sequence between start of the block and end of previous block (0 if this is the first) * right status (see below) * right count; num of bases in source after end of this block before start of next status (left/right) is a single char and can be: * C -- the sequence before or after is contiguous with this block. * I -- there are bases between the bases in this block and the one before or after it. * N -- this is the first sequence from this src chrom or scaffold. * n -- this is the first sequence from this src chrom or scaffold but it is bridged by another alignment from a different chrom or scaffold. * M -- there is missing data before or after this block (Ns in the sequence). * T -- the sequence in this block has been used before in a previous block (likely a tandem duplication) """ |
if i_line_parts[1] != seq.name:
raise MAFError("Trying to populate meta data for sequence " + seq.name +
" with i-line information for " +
str(i_line_parts[1]) + "; maflormed MAF file?")
if len(i_line_parts) != 6:
raise MAFError("i-line with " + str(len(i_line_parts)) + "; expected 6.")
seq.meta_data[LEFT_STATUS_KEY] = i_line_parts[2]
seq.meta_data[LEFT_COUNT_KEY] = int(i_line_parts[3])
seq.meta_data[RIGHT_STATUS_KEY] = i_line_parts[4]
seq.meta_data[RIGHT_COUNT_KEY] = int(i_line_parts[5]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __annotate_sequence_with_quality(seq, q_line_parts):
"""Extract meta data from pre-tokenized maf q-line and populate sequence. q -- quality information about an aligned base in a species. Two fields after the 'q': the source name and a single digit for each nucleotide in its sequence (0-9 or F, or - to indicate a gap). """ |
if q_line_parts[1] != seq.name:
raise MAFError("trying to populate meta data for sequence " + seq.name +
" with q-line information for " +
str(q_line_parts[1]) + "; maflormed MAF file?")
if len(q_line_parts[2]) != len(seq):
raise MAFError("trying to populate quality meta data for sequence with " +
"length " + str(len(seq)) + " using quality line with " +
"length " + str(len(q_line_parts[2])) + "; malformed " +
"MAF file?")
seq.meta_data[QUALITY_META_KEY] = q_line_parts[2] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_frontmatter(file_name, title, makenew=False):
""" Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project. Parameters ========== file_name : String Relative file path from where this method is called to the location of the file that will have frontmatter added. title : String Title of the page that will go into the Jekyll project. makenew : Boolean (OPTIONAL) If set to True, will create a new file with the frontmatter next to the original file with "_added_frontmatter" appended to its name. Otherwise, the method simply edits the original file. Examples ======== Suppose we have the following directory: data/ doc.md To write to a new file doc_add_frontmatter.md and add frontmatter: The last parameter can be omitted if you want to just overwrite doc.md. """ |
with open(file_name, "r+") as oldfile:
# Creates new file and writes to it if specified
if makenew:
with open(file_name[:-3] + '_added_frontmatter.md', 'w') as newfile:
newfile.write('---\n' + 'title: ' + title + '\n' + '---\n')
newfile.write(oldfile.read())
# Writes to old file if unspecified
else:
content = oldfile.read()
oldfile.seek(0)
oldfile.write('---\n' + 'title: ' + title + '\n' + '---\n' + content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summarize(self, text, n):
""" Return a list of n sentences which represent the summary of text. """ |
sents = sent_tokenize(text)
assert n <= len(sents)
word_sent = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_sent)
ranking = defaultdict(int)
for i,sent in enumerate(word_sent):
for w in sent:
if w in self._freq:
ranking[i] += self._freq[w]
sents_idx = self._rank(ranking, n)
return [sents[j] for j in sents_idx] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """ |
return nlargest(n, ranking, key=ranking.get) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_certifier():
""" Decorator that can wrap raw functions to create a certifier function. Certifier functions support partial application. If a function wrapped by `make_certifier` is called with a value as its first argument it will be certified immediately. If no value is passed, then it will return a function that can be called at a later time. Assuming that `certify_something` has been decorated by `make_certifier`: Is equivalent to: """ |
def decorator(func):
@six.wraps(func)
def wrapper(value=_undefined, **kwargs):
def certify(val):
if is_enabled():
exec_func(func, val, **kwargs)
return val
if value is not _undefined:
return certify(value)
else:
return certify
return wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def certify_required(value, required=False):
""" Certify that a value is present if required. :param object value: The value that is to be certified. :param bool required: Is the value required? :raises CertifierValueError: Required value is `None`. """ |
# Certify our kwargs:
if not isinstance(required, bool):
raise CertifierParamError(
'required',
required,
)
if value is None:
if required:
raise CertifierValueError(
message="required value is None",
)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def certify_parameter(certifier, name, value, kwargs=None):
""" Internal certifier for kwargs passed to Certifiable public methods. :param callable certifier: The certifier to use :param str name: The name of the kwargs :param object value: The value of the kwarg. :param bool required: Is the param required. Default=False. :raises CertifierParamError: A parameter failed internal certification. """ |
try:
certifier(value, **kwargs or {})
except CertifierError as err:
six.raise_from(
CertifierParamError(
name,
value,
),
err) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enable_from_env(state=None):
""" Enable certification for this thread based on the environment variable `CERTIFIABLE_STATE`. :param bool state: Default status to use. :return: The new state. :rtype: bool """ |
try:
x = os.environ.get(
ENVVAR,
state,
)
value = bool(int(x))
except Exception: # pylint: disable=broad-except
value = bool(state)
return enable(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compete(source_x, source_o, timeout=None, memlimit=None, cgroup='tictactoe', cgroup_path='/sys/fs/cgroup'):
"""Fights two source files. Returns either: * ('ok', 'x' | 'draw' | 'o', GAMEPLAY) * ('error', GUILTY, REASON, GAMEPLAY) REASON := utf8-encoded error string (can be up to 65k chars) GAMEPLAY := [ NUM ] GUILTY := 'x' | 'o' (during whose turn the error occured) NUM := 1..81 | 0 NUM=0 means the move resulted in error (then ERROR_STRING is non-empty) GAMEPLAY is never more than 255 characters long: len(",".join(map(str, range(1, 81)))) == 230 """ |
gameplay = []
for xo, moveresult, log in run_interactive(source_x, source_o, timeout,
memlimit, cgroup, cgroup_path):
if moveresult[0] == 'error':
return 'error', xo, moveresult[1], gameplay + [0]
elif moveresult[0] == 'state_coords':
gameplay.append(coords_to_num(moveresult[1][1]))
state = moveresult[1][0]
if state == 'draw' or state == 'x' or state == 'o':
return 'ok', state, gameplay |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _fix_key(key):
'''Normalize keys to Unicode strings.'''
if isinstance(key, unicode):
return key
if isinstance(key, str):
# On my system, the default encoding is `ascii`, so let's
# explicitly say UTF-8?
return unicode(key, 'utf-8')
raise TypeError(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def truncate_most_common(self, truncation_length):
'''
Sorts the counter and keeps only the most common items up to
``truncation_length`` in place.
:type truncation_length: int
'''
keep_keys = set(v[0] for v in self.most_common(truncation_length))
for key in self.keys():
if key not in keep_keys:
self.pop(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rst(value_rule):
'''Given the data and type information, generate a list of strings for
insertion into a RST document.
'''
lines = []
if value_rule.has('type'):
value_type = value_rule['type'].value
else:
value_type = 'string'
if value_type=='ignore':
pass
else:
lines.append('A *'+value_type+'* value is expected.')
lines.append('')
if value_type=="string":
pass
elif value_type=="label":
pass
elif value_type=="price":
pass
elif value_type=="qty":
pass
elif value_type=="percent":
pass
elif value_type=="check_list":
pass
elif value_type=="radio_select":
pass
elif value_type=="ignore":
pass
elif value_type=="unit":
pass
elif value_type=="angle":
pass
elif value_type=="file":
pass
elif value_type=="length":
pass
elif value_type=="distance":
pass
elif value_type=="duration":
pass
elif value_type=="mass":
pass
elif value_type=="temperature":
pass
elif value_type=="luminous_intensity":
pass
elif value_type=="current":
pass
elif value_type=="voltage":
pass
elif value_type=="frequency":
pass
elif value_type=="boolean":
pass
elif value_type=="integer":
pass
elif value_type=="float":
pass
elif value_type=="hexadecimal":
pass
return lines |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def circle_touching_line(center, radius, start, end):
""" Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector """ |
C, R = center, radius
A, B = start, end
a = (B.x - A.x)**2 + (B.y - A.y)**2
b = 2 * (B.x - A.x) * (A.x - C.x) \
+ 2 * (B.y - A.y) * (A.y - C.y)
c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \
- 2 * (C.x * A.x + C.y * A.y) - R**2
discriminant = b**2 - 4 * a * c
if discriminant < 0:
return False
elif discriminant == 0:
u = v = -b / float(2 * a)
else:
u = (-b + math.sqrt(discriminant)) / float(2 * a)
v = (-b - math.sqrt(discriminant)) / float(2 * a)
if u < 0 and v < 0: return False
if u > 1 and v > 1: return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate(self, overwrite=False):
"""Generate a config file for an upstart service. """ |
super(Upstart, self).generate(overwrite=overwrite)
svc_file_template = self.template_prefix + '.conf'
self.svc_file_path = self.generate_into_prefix + '.conf'
self.generate_file_from_template(svc_file_template, self.svc_file_path)
return self.files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch(self, default_path):
""" Internal method for fetching. This differs from :meth:`.fetch` in that it accepts a default path as an argument. """ |
if not self._path:
path = default_path
else:
path = self._path
req_type = 'GET' if len(self._post_params) == 0 else 'POST'
url = '/'.join(['http:/', self.spacegdn.endpoint, path])
resp = requests.request(req_type, url, params=self._get_params,
data=self._post_params, headers=self._headers)
response = Response()
data = None
if resp.ok:
data = resp.json()
response.add(data, resp.status_code, resp.reason)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def tokens(self, si, k):
'''`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
'''
for tokens in self[k]:
yield [si.body.sentences[tagid][sid].tokens[tid]
for tagid, sid, tid in tokens] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_lines(fname):
"""Return generator with line number and line for file `fname`.""" |
for line in fileinput.input(fname):
yield fileinput.filelineno(), line.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tonicdns_client(uri, method, token='', data='', keyword='', content='', raw_flag=False):
"""TonicDNS API client Arguments: uri: TonicDNS API URI method: TonicDNS API request method token: TonicDNS API authentication token data: Post data to TonicDNS API keyword: Processing keyword of response content: data exist flag raw_flag: True is return response data, False is pretty printing """ |
res = request(uri, method, data, token)
if token:
if keyword == 'serial':
args = {"token": token, "keyword": keyword, "content": content}
cur_soa, new_soa = response(uri, method, res, **args)
return cur_soa, new_soa
else:
if content is None:
args = {"token": token, "keyword": keyword,
"content": content.get('domain')}
response(uri, method, res, **args)
else:
# get sub command
args = {"token": token, "keyword": keyword,
"raw_flag": raw_flag}
data = response(uri, method, res, **args)
return data
else:
args = {"token": token, "keyword": keyword}
token = response(uri, method, res, **args)
return token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(uri, method, data, token=''):
"""Request to TonicDNS API. Arguments: uri: TonicDNS API URI method: TonicDNS API request method data: Post data to TonicDNS API token: TonicDNS API authentication token """ |
socket.setdefaulttimeout(__timeout__)
obj = urllib.build_opener(urllib.HTTPHandler)
# encoding json
encoded = json.JSONEncoder(object).encode(data)
# encoding utf8
data_utf8 = encoded.encode('utf-8')
req = urllib.Request(uri, data=data_utf8)
# When encoded(=data) is False, retrieve data as GET method.
if encoded:
req.add_header('Content-Type', 'application/json')
if token:
req.add_header('x-authentication-token', token)
req.get_method = lambda: method
try:
res = obj.open(req)
return res
except urllib.URLError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1)
except urllib.HTTPError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def response(uri, method, res, token='', keyword='', content='', raw_flag=False):
"""Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print """ |
if method == 'GET' or (method == 'PUT' and not token):
# response body
data = res.read()
data_utf8 = data.decode('utf-8')
if token:
datas = json.loads(data_utf8)
else:
token = json.loads(data_utf8)['hash']
return token
if keyword == 'serial':
# filtering with keyword
record = search_record(datas, 'SOA')[0]
# if SOA record, remove priority unnecessary
del record['priority']
# override ttl
record['ttl'] = int(record['ttl'])
c = JSONConverter(content['domain'])
new_record = c.get_soa(record, content)
return record, new_record
elif keyword:
# '--search' option of 'get' subcommand
records = search_record(datas, keyword)
datas.update({"records": records})
if uri.split('/')[3] == 'template':
# 'tmpl_get' subcommand
if len(uri.split('/')) == 5:
# when specify template identfier
#print_formatted(datas)
utils.pretty_print(datas)
else:
# when get all templates
for data in datas:
#print_formatted(data)
utils.pretty_print(datas)
else:
# 'get' subcommand
if raw_flag:
return datas
else:
#print_formatted(datas)
if len(uri.split('zone/')) > 1:
domain = uri.split('zone/')[1]
else:
domain = ''
utils.pretty_print(datas, keyword, domain)
else:
# response non JSON data
data = res.read()
print(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_record(datas, keyword):
"""Search target JSON -> dictionary Arguments: datas: dictionary of record datas keyword: search keyword (default is null) Key target is "name" or "content" or "type". default null. Either key and type, or on the other hand. When keyword has include camma ",", Separate keyword to name, type, content. """ |
key_name, key_type, key_content = False, False, False
if keyword.find(',') > -1:
if len(keyword.split(',')) == 3:
key_content = keyword.split(',')[2]
key_name = keyword.split(',')[0]
key_type = keyword.split(',')[1]
result = []
for record in datas['records']:
if key_name and key_type:
if key_content:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type and
record['content'].find(key_content) > -1)):
result.append(record)
else:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type)):
result.append(record)
elif ((record['name'].find(keyword) >= 0 or
record['content'].find(keyword) >= 0 or
record['type'] == keyword)):
result.append(record)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_formatted(datas):
"""Pretty print JSON DATA Argument: datas: dictionary of data """ |
if not datas:
print("No data")
exit(1)
if isinstance(datas, list):
# get all zones
# API /zone without :identifier
hr()
print('%-20s %-8s %-12s'
% ('name', 'type', 'notified_serial'))
hr()
for record in datas:
# print 'NAME'
utils.print_inline("%(name)-20s" % record)
# print 'TYPE' of SOA record
utils.print_inline("%(type)-8s" % record)
if record.get('notified_serial'):
print("%(notified_serial)s" % record)
else:
print('')
exit(0)
elif datas.get('records'):
print("domain: %(name)s" % datas)
if datas.get('type') == 'MASTER' and datas.get('notified_serial'):
print("serial: %(notified_serial)s" % datas)
print("DNS : %(type)s" % datas)
# print header
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
hr()
for record in datas.get('records'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' of SOA record
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' of non SOA record
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' of non SOA
if record.get('type') == 'SOA':
utils.print_inline(">\t\t%(content)-25s " % record)
# print 'CONTENT' of SOA record
else:
utils.print_inline("%(content)-25s" % record)
# print TTL, and PRIORITY for MX, SRV record
if record.get('priority'):
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print ttl for non SOA record
else:
print("%(ttl)5s " % record)
hr()
elif datas.get('identifier'):
# for template
print("identifier : %(identifier)s" % datas)
print("description: %(description)s" % datas)
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
for record in datas.get('entries'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' for SOA
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' for non SOA
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' for SOA
if record.get('type') == 'SOA':
utils.print_inline("> %(content)-25s " % record)
# print 'CONTENT' for non SOA
else:
utils.print_inline("%(content)-24s" % record)
# print 'TTL', and 'PRIORITY'
if record.get('priority') is not None:
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print
else:
print("%(ttl)5s " % record)
hr()
else:
print("No match records") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute_deferred_effects(self, pos):
""" Evaluates deferred effects that are triggered by the prefix of the pos on the current beliefstate. For instance, if the effect is triggered by the 'NN' pos, then the effect will be triggered by 'NN' or 'NNS'.""" |
costs = 0
to_delete = []
for entry in self.__dict__['deferred_effects']:
effect_pos, effect = entry
if pos.startswith(effect_pos):
costs += effect(self)
to_delete.append(entry)
# we delete afterwards, because Python cannot delete from a list that
# is being iterated over without screwing up the iteration.
for entry in to_delete:
self.__dict__['deferred_effects'].remove(entry)
return costs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """ |
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_breadth_first(self, root=None):
""" Traverses the belief state's structure breadth-first """ |
if root == None:
root = self
yield root
last = root
for node in self.iter_breadth_first(root):
if isinstance(node, DictCell):
# recurse
for subpart in node:
yield subpart
last = subpart
if last == node:
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_path(self, test_function=None, on_targets=False):
""" General helper method that iterates breadth-first over the referential_domain's cells and returns a path where the test_function is True """ |
assert self.has_referential_domain(), "need context set"
if not test_function:
test_function = lambda x, y: True
def find_path_inner(part, prefix):
name, structure = part
if test_function(name, structure):
yield prefix + [name]
if isinstance(structure, DictCell):
for sub_structure in structure:
for prefix2 in find_path_inner(sub_structure,\
prefix[:] + [name]):
yield prefix2
prefix = []
if on_targets:
# apply search to the first target
results = []
for _, instance in self.iter_singleton_referents():
for part in instance:
for entry in find_path_inner(part, prefix[:]):
results.append(['target'] + entry)
while results:
yield results.pop()
break # only use first instance
else:
# apply search to self
for part in self:
for entry in find_path_inner(part, prefix[:]):
yield entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_nth_unique_value(self, keypath, n, distance_from, open_interval=True):
""" Returns the `n-1`th unique value, or raises a contradiction if that is out of bounds """ |
unique_values = self.get_ordered_values(keypath, distance_from, open_interval)
if 0 <= n < len(unique_values):
#logging.error("%i th unique value is %s" % (n, str(unique_values[n])))
return unique_values[n]
else:
raise Contradiction("n-th Unique value out of range: " + str(n)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.