text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isBirthday(self):
""" Is it the user's birthday today? """ |
if not self.birthday:
return False
birthday = self.birthdate()
today = date.today()
return (birthday.month == today.month and
birthday.day == today.day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
""" If there is an LDAP connection, query it for another instance of this member and set its internal dictionary to that result. """ |
if not self.ldap:
return
self.memberDict = self.ldap.member(self.uid) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consume(self, stream, source=None, chunksize=1):
""" Consuming given strem object and returns processing stats. :param stream: streaming object to consume :type stream: iterable :param source: source of stream to consume :type source: string :param chunksize: chunk size for multiprocessing :type chunksize: integer :rtype: dict """ |
stats = {
PROCESSING_TOTAL: 0,
PROCESSING_SKIPPED: 0,
PROCESSING_SUCCESS: 0,
PROCESSING_ERROR: 0
}
if source:
stats['source'] = source
def skip_unless(r):
if r:
return r
stats[PROCESSING_SKIPPED] += 1
stats[PROCESSING_TOTAL] += 1
rs = ifilter(skip_unless, stream)
if self.processes:
pool = multiprocessing.Pool(processes=self.processes)
for f in self.procedures:
rs = pool.imap_unordered(f, ifilter(skip_unless, rs),
chunksize=chunksize)
else:
for f in self.procedures:
rs = imap(f, ifilter(skip_unless, rs))
start = time.time()
i = 0
try:
while 1:
processed = next(rs)
if processed is None:
stats[PROCESSING_SKIPPED] += 1
elif processed is False:
stats[PROCESSING_ERROR] += 1
else:
stats[PROCESSING_SUCCESS] += 1
self.collect(processed)
i += 1
stats[PROCESSING_TOTAL] += 1
if i % self.reporting_interval == 0:
logging.info(" ===> Processed %dth item <=== ", i)
except StopIteration:
pass
except KeyboardInterrupt:
logging.info("Stopped by user interruption at %dth item.", i)
raise
except:
e = sys.exc_info()[1]
logging.error(e)
raise
finally:
if self.processes:
pool.close()
pool.join()
stats[PROCESSING_TIME] = time.time() - start
logging.info(
'STATS: total=%d, skipped=%d, success=%d, error=%d on %f[sec]'
' from "%s"',
stats[PROCESSING_TOTAL], stats[PROCESSING_SKIPPED],
stats[PROCESSING_SUCCESS], stats[PROCESSING_ERROR],
stats[PROCESSING_TIME], stats.get('source', 'unknown'))
return stats |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reader(self, fp, encoding):
""" Simple `open` wrapper for several file types. This supports ``.gz`` and ``.json``. :param fp: opened file :type fp: file pointer :param encoding: encoding of opened file :type encoding: string :rtype: file pointer """ |
_, suffix = os.path.splitext(fp.name)
if suffix == '.gz':
fp.close()
return gzip.open(fp.name)
elif suffix == '.json':
return json.load(fp)
elif suffix == '.csv' or self.delimiter:
return csvreader(fp, encoding, delimiter=self.delimiter or ',')
elif suffix == '.tsv':
return csvreader(fp, encoding, delimiter='\t')
return fp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(self, files, encoding, chunksize=1):
""" Handle given files with given encoding. :param files: opened files. :type files: list :param encoding: encoding of opened file :type encoding: string :param chunksize: a number of chunk :type chunksize: int :rtype: list """ |
stats = []
if files:
logging.info("Input file count: %d", len(files))
for fp in files:
stream = self.reader(fp, encoding)
parsed = self.streamer.consume(stream,
source=fp.name, chunksize=chunksize)
stats.append(parsed)
if not fp.closed:
fp.close()
else:
stream = sys.stdin
if self.delimiter:
stream = csvreader(stream, encoding, delimiter=self.delimiter)
parsed = self.streamer.consume(stream, chunksize=chunksize)
stats.append(parsed)
return stats |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(parser = None):
""" module needs to be initialized by 'init'. Can be called with parser to use a pre-built parser, otherwise a simple default parser is created """ |
global p,subparsers
if parser is None:
p = argparse.ArgumentParser()
else:
p = parser
arg = p.add_argument
subparsers = p.add_subparsers() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self):
""" Authenticates with the PA Oauth system """ |
if self._auth_token is None or self._token_expiry < time.time():
self._perform_auth()
yield self._auth_token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _query_api(self, method, url, fields=None, extra_headers=None, req_body=None):
""" Abstracts http queries to the API """ |
with self.auth.authenticate() as token:
logging.debug('PA Authentication returned token %s', token)
headers = {
'Authorization': 'Bearer %s' % (token,),
'Realm': self.auth_realm
}
if extra_headers is not None:
headers.update(extra_headers)
logging.info('[%s] %s', method, url)
if req_body is not None:
response = self.http.request(method, url, fields, headers, body=req_body)
else:
response = self.http.request(method, url, fields, headers)
if response.status != 200:
print(response.data)
logging.warning('Got non-200 HTTP status from API: %d', response.status)
raise ApiQueryError("Failed to get API data", response.status)
return json.loads(response.data.decode()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_jobtemplates(self):
""" Retrieves the list of jobTemplates for the current realm. """ |
endpoint = self._build_url('jobTemplates', {
'paginationPageSize': self.PAGE_SIZE
})
data = self._query_api('GET', endpoint)
return data['results'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_job_template(self, template):
""" Creates a job template """ |
endpoint = self._build_url('jobTemplates')
data = self._query_api('POST',
endpoint,
None,
{'Content-Type': 'application/json'},
json.dumps(template))
return data['results'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_job(self, job_template_uri):
""" Creates a job """ |
endpoint = self._build_url('jobs')
data = self._query_api('POST',
endpoint,
None,
{'Content-Type': 'application/json'},
json.dumps({'jobTemplateUri': job_template_uri}))
return data['results'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_to_range( values, minimum = 0.0, maximum = 1.0 ):
""" This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. """ |
normalized_values = []
minimum_value = min(values)
maximum_value = max(values)
for value in values:
numerator = value - minimum_value
denominator = maximum_value - minimum_value
value_normalized = (maximum - minimum) * numerator/denominator + minimum
normalized_values.append(value_normalized)
return normalized_values |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_element_combinations_variadic( elements_specification ):
""" This function accepts a specification of lists of elements for each place in lists in the form of a list, the elements of which are lists of possible elements and returns a list of lists corresponding to the combinations of elements of the specification with varying numbers of elements. For example, the list elements specification [[10, 20], [30, 40], [50, 60]] yields the following lists: [10] [20] [10, 30] [10, 40] [20, 30] [20, 40] [10, 30, 50] [10, 30, 60] [10, 40, 50] [10, 40, 60] [20, 30, 50] [20, 30, 60] [20, 40, 50] [20, 40, 60] """ |
lists = [list(list_generated) for index, element_specification in enumerate(elements_specification) for list_generated in itertools.product(*elements_specification[:index + 1])]
return lists |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correlation_linear( values_1, values_2, printout = None ):
""" This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size. """ |
r, p_value = scipy.stats.pearsonr(values_1, values_2)
if printout is not True:
return r, p_value
else:
text = (
"Pearson linear correlation coefficient: {r}\n"
"2-tailed p-value: {p_value}"
).format(
r = r,
p_value = p_value
)
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def propose_number_of_bins( values, binning_logic_system = None, ):
""" This function returns a proposal for binning for a histogram of a specified list using an optional specified binning logic system. Freedman-Diaconis: bin width is proportional to the interquartile range of the data divided by the cube root of the size of the data Scott: bin width is proportional to the standard deviation of the values divided by the cube root of the size of the data """ |
# Set the default binning logic system.
if binning_logic_system is None:
binning_logic_system = "Scott"
# Engage the requested logic system.
if binning_logic_system == "Freedman-Diaconis":
#log.debug("engage Freedman-Diaconis binning logic")
bin_size =\
2 * interquartile_range(values) * \
len(values) ** (-1/3)
elif binning_logic_system == "Scott":
#log.debug("engage Scott binning logic")
bin_size =\
3.5 * standard_deviation(values) * \
len(values) ** (-1/3)
else:
log.error("undefined binning logic system requested")
raise(ValueError)
number_of_bins = (max(values) - min(values)) / bin_size
if numpy.isinf(number_of_bins) or numpy.isnan(number_of_bins):
number_of_bins = len(set(values)) # number of unique values
#log.debug(
# "binning algorithms ineffective -- " +
# "propose binning by unique values"
#)
return int(round(number_of_bins)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extent(self):
""" return range of 2D data """ |
return [min(self.x), max(self.x), min(self.y), max(self.y)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_symbol_by_slope( self, slope, default_symbol ):
""" return line oriented approximatively along the slope value """ |
if slope > math.tan(3 * math.pi / 8):
draw_symbol = "|"
elif math.tan(math.pi / 8) < slope < math.tan(3 * math.pi / 8):
draw_symbol = u"\u27cb" # "/"
elif abs(slope) < math.tan(math.pi / 8):
draw_symbol = "-"
elif slope < math.tan(-math.pi / 8) and\
slope > math.tan(-3 * math.pi / 8):
draw_symbol = u"\u27CD" # "\\"
elif slope < math.tan(-3 * math.pi / 8):
draw_symbol = "|"
else:
draw_symbol = default_symbol
return draw_symbol |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def limit_x( self, limit_lower = None, # float limit_upper = None # float ):
""" get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max """ |
if limit_lower is None and limit_upper is None:
return self._limit_x
elif hasattr(limit_lower, "__iter__"):
self._limit_x = limit_lower[:2]
else:
self._limit_x = [limit_lower, limit_upper]
if self._limit_x[0] == self._limit_x[1]:
self._limit_x[1] += 1
self._limit_x[0] -= self.mod_x
self._limit_x[1] += self.mod_x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def limit_y( self, limit_lower = None, limit_upper = None ):
""" get or set y limits of the current axes y_min, y_max = limit_x() # return the current limit_y limit_y(y_min, y_max) # set the limit_y to y_min, y_max """ |
if limit_lower is None and limit_upper is None:
return self._limit_y
elif hasattr(limit_lower, "__iter__"):
self._limit_y = limit_lower[:2]
else:
self._limit_y = [limit_lower, limit_upper]
if self._limit_y[0] == self._limit_y[1]:
self._limit_y[1] += 1
self._limit_y[0] -= self.mod_y
self._limit_y[1] += self.mod_y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clip_line( self, line_pt_1, line_pt_2 ):
""" clip line to canvas """ |
x_min = min(line_pt_1[0], line_pt_2[0])
x_max = max(line_pt_1[0], line_pt_2[0])
y_min = min(line_pt_1[1], line_pt_2[1])
y_max = max(line_pt_1[1], line_pt_2[1])
extent = self.extent()
if line_pt_1[0] == line_pt_2[0]:
return (
(line_pt_1[0], max(y_min, extent[1])),
(line_pt_1[0], min(y_max, extent[3]))
)
if line_pt_1[1] == line_pt_2[1]:
return (
(max(x_min, extent[0]), line_pt_1[1]),
(min(x_max, extent[2]), line_pt_1[1])
)
if ((extent[0] <= line_pt_1[0] < extent[2]) and
(extent[1] <= line_pt_1[1] < extent[3]) and
(extent[0] <= line_pt_2[0] < extent[2]) and
(extent[1] <= line_pt_2[1] < extent[3])):
return line_pt_1, line_pt_2
ts = [0.0,
1.0,
float(extent[0] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]),
float(extent[2] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]),
float(extent[1] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]),
float(extent[3] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1])
]
ts.sort()
if (ts[2] < 0) or (ts[2] >= 1) or (ts[3] < 0) or (ts[2] >= 1):
return None
result =\
[(pt_1 + t * (pt_2 - pt_1))\
for t in (ts[2], ts[3])\
for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)]
return (result[:2], result[2:]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dyndns_records(login, password):
"""Gets the set of dynamic DNS records associated with this account""" |
params = dict(action='getdyndns', sha=get_auth_key(login, password))
response = requests.get('http://freedns.afraid.org/api/', params=params, timeout=timeout)
raw_records = (line.split('|') for line in response.content.split())
try:
records = frozenset(DnsRecord(*record) for record in raw_records)
except TypeError:
raise ApiError("Couldn't parse the server's response",
response.content)
return records |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_continuously(records, update_interval=600):
"""Update `records` every `update_interval` seconds""" |
while True:
for record in records:
try:
record.update()
except (ApiError, RequestException):
pass
time.sleep(update_interval) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
"""Updates remote DNS record by requesting its special endpoint URL""" |
response = requests.get(self.update_url, timeout=timeout)
match = ip_pattern.search(response.content)
# response must contain an ip address, or else we can't parse it
if not match:
raise ApiError("Couldn't parse the server's response",
response.content)
self.ip = match.group(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_app(app, api):
"""setup the resources urls.""" |
api.add_resource(
KnwKBAllResource,
'/api/knowledge'
)
api.add_resource(
KnwKBResource,
'/api/knowledge/<string:slug>'
)
api.add_resource(
KnwKBMappingsResource,
'/api/knowledge/<string:slug>/mappings'
)
api.add_resource(
KnwKBMappingsToResource,
'/api/knowledge/<string:slug>/mappings/to'
)
api.add_resource(
KnwKBMappingsFromResource,
'/api/knowledge/<string:slug>/mappings/from'
)
# for other urls, return "Method Not Allowed"
api.add_resource(
NotImplementedKnowledegeResource,
'/api/knowledge/<string:slug>/<path:foo>'
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, slug):
"""Get KnwKB. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ |
kb = api.get_kb_by_slug(slug)
# check if is accessible from api
check_knowledge_access(kb)
parser = reqparse.RequestParser()
parser.add_argument(
'from', type=str,
help="Return only entries where key matches this.")
parser.add_argument(
'to', type=str,
help="Return only entries where value matches this.")
parser.add_argument('page', type=int,
help="Require a specific page")
parser.add_argument('per_page', type=int,
help="Set how much result per page")
parser.add_argument('match_type', type=str,
help="s=substring, e=exact, sw=startswith")
parser.add_argument('sortby', type=str,
help="the sorting criteria ('from' or 'to')")
args = parser.parse_args()
kb_dict = kb.to_dict()
kb_dict['mappings'] = KnwKBMappingsResource \
.search_mappings(kb=kb, key=args['from'], value=args['to'],
match_type=args['match_type'],
sortby=args['sortby'], page=args['page'],
per_page=args['per_page'])
return kb_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_mappings(kb, key=None, value=None, match_type=None, sortby=None, page=None, per_page=None):
"""Search tags for knowledge.""" |
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
return pagination.RestfulSQLAlchemyPagination(
api.query_kb_mappings(
kbid=kb.id,
key=key or '',
value=value or '',
match_type=match_type or 's',
sortby=sortby or 'to',
), page=page or 1, per_page=per_page or 10
).items
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, slug):
"""Get list of mappings. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ |
kb = api.get_kb_by_slug(slug)
# check if is accessible from api
check_knowledge_access(kb)
parser = reqparse.RequestParser()
parser.add_argument(
'from', type=str,
help="Return only entries where 'from' matches this.")
parser.add_argument(
'to', type=str,
help="Return only entries where 'to' matches this.")
parser.add_argument('page', type=int,
help="Require a specific page")
parser.add_argument('per_page', type=int,
help="Set how much result per page")
parser.add_argument('match_type', type=str,
help="s=substring, e=exact, sw=startswith")
parser.add_argument('sortby', type=str,
help="the sorting criteria ('from' or 'to')")
args = parser.parse_args()
return KnwKBMappingsResource \
.search_mappings(kb, args['from'], args['to'],
args['match_type'], args['sortby'],
args['page'], args['per_page']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_list(kb, value=None, match_type=None, page=None, per_page=None, unique=False):
"""Search "mappings to" for knowledge.""" |
# init
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
# get the base query
query = api.query_kb_mappings(
kbid=kb.id,
value=value or '',
match_type=match_type or 's'
).with_entities(models.KnwKBRVAL.m_value)
# if you want a 'unique' list
if unique:
query = query.distinct()
# run query and paginate
return [item.m_value for item in
pagination.RestfulSQLAlchemyPagination(
query, page=page or 1,
per_page=per_page or 10
).items]
elif kb.kbtype == models.KnwKB.KNWKB_TYPES['dynamic']:
items = api.get_kbd_values(kb.name, value)
return pagination.RestfulPagination(
page=page, per_page=per_page,
total_count=len(items)
).slice(items)
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_list(kb, from_=None, match_type=None, page=None, per_page=None, unique=False):
"""Search "mapping from" for knowledge.""" |
# init
page = page or 1
per_page = per_page or 10
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
# get the base query
query = api.query_kb_mappings(
kbid=kb.id,
key=from_ or '',
match_type=match_type or 's'
).with_entities(models.KnwKBRVAL.m_key)
# if you want a 'unique' list
if unique:
query = query.distinct()
# run query and paginate
return [item.m_key for item in
pagination.RestfulSQLAlchemyPagination(
query, page=page or 1,
per_page=per_page or 10
).items]
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, slug):
"""Get list of "mappings from". Url parameters - unique: if set, return a unique list - filter: filter "mappings from" - page - per_page - match_type: s=substring, e=exact, sw=startswith """ |
kb = api.get_kb_by_slug(slug)
# check if is accessible from api
check_knowledge_access(kb)
parser = reqparse.RequestParser()
parser.add_argument(
'unique', type=bool,
help="The list contains unique names of 'mapping to'")
parser.add_argument(
'filter', type=str,
help="Return only entries where 'from' matches this.")
parser.add_argument('page', type=int,
help="Require a specific page")
parser.add_argument('per_page', type=int,
help="Set how much result per page")
parser.add_argument('match_type', type=str,
help="s=substring, e=exact, sw=startswith")
args = parser.parse_args()
return KnwKBMappingsFromResource \
.search_list(kb, args['filter'],
args['match_type'],
args['page'], args['per_page'], args['unique']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def manage_mep(self, mep_json):
'''
Import a mep as a representative from the json dict fetched from
parltrack
'''
# Some versions of memopol will connect to this and skip inactive meps.
responses = representative_pre_import.send(sender=self,
representative_data=mep_json)
for receiver, response in responses:
if response is False:
logger.debug(
'Skipping MEP %s', mep_json['Name']['full'])
return
changed = False
slug = slugify('%s-%s' % (
mep_json["Name"]["full"] if 'full' in mep_json["Name"]
else mep_json["Name"]["sur"] + " " + mep_json["Name"]["family"],
_parse_date(mep_json["Birth"]["date"])
))
try:
representative = Representative.objects.get(slug=slug)
except Representative.DoesNotExist:
representative = Representative(slug=slug)
changed = True
# Save representative attributes
self.import_representative_details(representative, mep_json, changed)
self.add_mandates(representative, mep_json)
self.add_contacts(representative, mep_json)
logger.debug('Imported MEP %s', unicode(representative))
return representative |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calc_dir_size(path):
""" Calculate size of all files in `path`. Args: path (str):
Path to the directory. Returns: int: Size of the directory in bytes. """ |
dir_size = 0
for (root, dirs, files) in os.walk(path):
for fn in files:
full_fn = os.path.join(root, fn)
dir_size += os.path.getsize(full_fn)
return dir_size |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_localized_fn(path, root_dir):
""" Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str):
Absolute path beginning in `root_dir`. root_dir (str):
Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS. """ |
local_fn = path
if path.startswith(root_dir):
local_fn = path.replace(root_dir, "", 1)
if not local_fn.startswith("/"):
return "/" + local_fn
return local_fn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose_info(root_dir, files, hash_fn, aleph_record, urn_nbn=None):
""" Compose `info` XML file. Info example:: <?xml version="1.0" encoding="UTF-8" standalone="yes" ?> <info> <created>2014-07-31T10:58:53</created> <metadataversion>1.0</metadataversion> <packageid>c88f5a50-7b34-11e2-b930-005056827e51</packageid> <mainmets>mets.xml</mainmets> <titleid type="ccnb">cnb001852189</titleid> <titleid type="isbn">978-80-85979-89-6</titleid> <collection>edeposit</collection> <institution>nakladatelství Altar</institution> <creator>ABA001</creator> <size>1530226</size> <itemlist itemtotal="1"> <item>\data\Denik_zajatce_Sramek_CZ_v30f-font.epub</item> </itemlist> <checksum type="MD5" checksum="ce076548eaade33888005de5d4634a0d"> \MD5.md5 </checksum> </info> Args: root_dir (str):
Absolute path to the root directory. files (list):
Absolute paths to all ebook and metadata files. hash_fn (str):
Absolute path to the MD5 file. aleph_record (str):
String with Aleph record with metadata. Returns: str: XML string. """ |
# compute hash for hashfile
with open(hash_fn) as f:
hash_file_md5 = hashlib.md5(f.read()).hexdigest()
schema_location = "http://www.ndk.cz/standardy-digitalizace/info11.xsd"
document = odict[
"info": odict[
"@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"@xsi:noNamespaceSchemaLocation": schema_location,
"created": time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()),
"metadataversion": "1.0",
"packageid": _path_to_id(root_dir),
# not used in SIP
# "mainmets": _get_localized_fn(metadata_fn, root_dir),
"titleid": None,
"collection": "edeposit",
"institution": None,
"creator": None,
"size": _calc_dir_size(root_dir) / 1024, # size in kiB
"itemlist": odict[
"@itemtotal": "2",
"item": map(
lambda x: _get_localized_fn(x, root_dir),
files
)
],
"checksum": odict[
"@type": "MD5",
"@checksum": hash_file_md5,
"#text": _get_localized_fn(hash_fn, root_dir)
],
]
]
# get informations from MARC record
record = MARCXMLRecord(aleph_record)
# get publisher info
publisher = unicode(record.get_publisher(), "utf-8")
if record.get_publisher(None):
document["info"]["institution"] = remove_hairs(publisher)
# get <creator> info
creator = record.getDataRecords("910", "a", False)
alt_creator = record.getDataRecords("040", "d", False)
document["info"]["creator"] = creator[0] if creator else alt_creator[-1]
# collect informations for <titleid> tags
isbns = record.get_ISBNs()
ccnb = record.getDataRecords("015", "a", False)
ccnb = ccnb[0] if ccnb else None
if any([isbns, ccnb, urn_nbn]): # TODO: issn
document["info"]["titleid"] = []
for isbn in isbns:
document["info"]["titleid"].append({
"@type": "isbn",
"#text": isbn
})
if ccnb:
document["info"]["titleid"].append({
"@type": "ccnb",
"#text": ccnb
})
if urn_nbn:
document["info"]["titleid"].append({
"@type": "urnnbn",
"#text": urn_nbn
})
# TODO: later
# if issn:
# document["info"]["titleid"].append({
# "@type": "issn",
# "#text": issn
# })
# remove unset options
unset_keys = [
key
for key in document["info"]
if key is None
]
for key in unset_keys:
del document[key]
xml_document = xmltodict.unparse(document, pretty=True)
return xml_document.encode("utf-8") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(argv=None):
"""Main CLI entry point.""" |
cli = InfrascopeCLI()
return cli.run(sys.argv[1:] if argv is None else argv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_app(configobj=ProdConfig):
""" Create and configure Flask Application """ |
app = Flask(__name__)
app.config.from_object(configobj)
configure_blueprints(app)
configure_extensions(app)
configure_callbacks(app)
configure_filters(app)
configure_error_handlers(app)
return app |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure_extensions(app):
""" Configure application extensions """ |
db.init_app(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
assets.init_app(app)
for asset in bundles:
for (name, bundle) in asset.iteritems():
assets.register(name, bundle)
login_manager.login_view = 'frontend.login'
login_manager.login_message_category = 'info'
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
login_manager.init_app(app)
cache.init_app(app)
migrate.init_app(app, db)
toolbar.init_app(app) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure_callbacks(app):
""" Configure application callbacks """ |
@app.before_request
def before_request():
""" Retrieve menu configuration before every request (this will return
cached version if possible, else reload from database. """
from flask import session
#g.menusystem = helper.generate_menusystem()
session['menusystem'] = helper.generate_menusystem()
print session['menusystem'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def configure_error_handlers(app):
""" Configure application error handlers """ |
def render_error(error):
return (render_template('errors/%s.html' % error.code,
title=error_messages[error.code], code=error.code), error.code)
for (errcode, title) in error_messages.iteritems():
app.errorhandler(errcode)(render_error) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _strip_colors(self, message: str) -> str: """ Remove all of the color tags from this message. """ |
for c in self.COLORS:
message = message.replace(c, "")
return message |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_str(window, line_num, str):
""" attempt to draw str on screen and ignore errors if they occur """ |
try:
window.addstr(line_num, 0, str)
except curses.error:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_file_to_descriptor(input_queue, descriptor):
""" get item from input_queue and write it to descriptor returns True if and only if it was successfully written """ |
try:
file_name = input_queue.get(timeout=2)
descriptor.write("{}\n".format(file_name))
descriptor.flush()
input_queue.task_done()
return True
except Empty:
# no more files in queue
descriptor.close()
return False
except IOError:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def smooth(l):
"""Yields a generator which smooths all elements as if the given list was of depth 1. **Examples**: :: list(auxly.listy.smooth([1,[2,[3,[4]]]])) # [1, 2, 3, 4] """ |
if type(l) in [list, tuple]:
for i in l:
for j in smooth(i):
yield j
else:
yield l |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def s3path(self, rel_path):
"""Return the path as an S3 schema""" |
import urlparse
path = self.path(rel_path, public_url=True)
parts = list(urlparse.urlparse(path))
parts[0] = 's3'
parts[1] = self.bucket_name
return urlparse.urlunparse(parts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_stream(self, rel_path, cb=None, return_meta=False):
"""Return the object as a stream""" |
from boto.s3.key import Key
from boto.exception import S3ResponseError
import StringIO
from . import MetadataFlo
b = StringIO.StringIO()
try:
k = self._get_boto_key(rel_path)
if not k:
return None
k.get_contents_to_file(b, cb=cb, num_cb=100)
b.seek(0)
if return_meta:
d = k.metadata
d['size'] = k.size
d['etag'] = k.etag
else:
d = {}
return MetadataFlo(b, d)
except S3ResponseError as e:
if e.status == 404:
return None
else:
raise e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def list(self, path=None, with_metadata=False, include_partitions=False):
'''Get a list of all of bundle files in the cache. Does not return partition files'''
import json
sub_path = self.prefix + '/' + path.strip('/') if path else self.prefix
l = {}
for e in self.bucket.list(sub_path):
path = e.name.replace(self.prefix, '', 1).strip('/')
if path.startswith('_') or path.startswith('meta'):
continue
# TODO 'include_partitions' doesn't make any sense outside of ambry
if not include_partitions and path.count('/') > 1:
continue # partition files
if with_metadata:
d = self.metadata(path)
if d and 'identity' in d:
d['identity'] = json.loads(d['identity'])
else:
d = {}
d['caches'] = [self.repo_id]
if path:
l[path] = d
return l |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _send_streamify(self, frame):
""" Helper method to streamify a frame. """ |
# Get the state and framer
state = self._send_framer_state
framer = self._send_framer
# Reset the state as needed
state._reset(framer)
# Now pass the frame through streamify() and return the result
return framer.streamify(state, frame) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _recv_frameify(self, data):
""" Helper method to frameify a stream. """ |
# Get the state and framer
state = self._recv_framer_state
framer = None
# Grab off as many frames as we can
frameify = None
while True:
# Check if we need to change framers
if framer != self._recv_framer:
# Notify the currently-running framer
if frameify:
try:
frameify.throw(framers.FrameSwitch)
except StopIteration:
pass
# Set up the new framer
framer = self._recv_framer
state._reset(framer)
frameify = framer.frameify(state, data)
data = '' # Now part of the state's buffer
# Get the next frame
try:
frame = frameify.next()
except StopIteration:
# OK, we've extracted as many frames as we can
break
# OK, send the frame to the application
if self._application:
self._application.recv_frame(frame) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def closed(self, error=None):
""" Notify the application that the connection has been closed. :param error: The exception which has caused the connection to be closed. If the connection has been closed due to an EOF, pass ``None``. """ |
if self._application:
try:
self._application.closed(error)
except Exception:
# Ignore exceptions from the notification
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_framer(self, value):
""" Set the framer in use for the sending side of the connection. The framer state will be reset next time the framer is used. """ |
if not isinstance(value, framers.Framer):
raise ValueError("framer must be an instance of tendril.Framer")
self._send_framer = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recv_framer(self, value):
""" Set the framer in use for the receiving side of the connection. The framer state will be reset next time the framer is used. """ |
if not isinstance(value, framers.Framer):
raise ValueError("framer must be an instance of tendril.Framer")
self._recv_framer = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def framers(self, value):
""" Set the framers in use for the connection. The framer states will be reset next time their respective framer is used. """ |
# Handle sequence values
if isinstance(value, collections.Sequence):
if len(value) != 2:
raise ValueError('need exactly 2 values to unpack')
elif (not isinstance(value[0], framers.Framer) or
not isinstance(value[1], framers.Framer)):
raise ValueError("framer must be an instance of "
"tendril.Framer")
self._send_framer, self._recv_framer = value
# If we have a single value, assume it's a framer
else:
if not isinstance(value, framers.Framer):
raise ValueError("framer must be an instance of "
"tendril.Framer")
self._send_framer = value
self._recv_framer = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def framers(self):
""" Reset the framers in use for the connection to be a tendril.IdentityFramer. The framer states will be reset next time their respective framer is used. """ |
f = self.default_framer()
self._send_framer = f
self._recv_framer = f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def application(self, value):
"""Update the application.""" |
# Always allow None
if value is None:
self._application = None
return
# Check that the state is valid
if not isinstance(value, application.Application):
raise ValueError("application must be an instance of "
"tendril.Application")
self._application = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build():
""" Builds pages given template.jinja, style.css, and content.rst produces index.html. """ |
test_files()
with open('content.rst') as f:
content = publish_parts(f.read(), writer_name='html')
title = content['title']
body = content['html_body'].replace('\n',' ')
with open('template.jinja', 'r') as f:
loader = FileSystemLoader(getcwd())
env= Environment(loader=loader)
template = env.get_template('template.jinja')
page = template.render(title=title,
content=body)
with open('index.html', 'w') as f:
f.write(page) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(directory=None):
""" Initializes a new site in the `directory` Current working dir if directory is None. """ |
if directory is not None and not path.exists(directory):
makedirs(directory)
else:
print('%s already exists, populating with template files' % (directory))
directory = ''
if not path.isfile(path.join(directory,'style.css')):
grab('style.css', directory)
print('Added sample style')
if not path.isfile(path.join(directory,'template.jinja')):
grab('template.jinja', directory)
print('Added sample template.jinja')
if not path.isfile(path.join(directory,'content.rst')):
grab('content.rst', directory)
print('Added sample content.rst') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def callback(self, callback, *args, **kwds):
""" Registers an arbitrary callback and arguments. Cannot suppress exceptions. """ |
return self << _CloseDummy(callback, args, kwds) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pop_all(self):
""" Preserve the context stack by transferring it to a new instance """ |
ret = ExitStack()
ret._context_stack.append(self._context_stack.pop())
self._context_stack.append([]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, **kwargs):
"""Update fields on the model. :param kwargs: The model attribute values to update the model with. """ |
self.validate(**kwargs)
for attr, value in kwargs.items():
setattr(self, attr, value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(cls, partial=True, **kwargs):
""" Validate kwargs before setting attributes on the model """ |
data = kwargs
if not partial:
data = dict(**kwargs, **{col.name: None for col in cls.__table__.c
if col.name not in kwargs})
errors = defaultdict(list)
for name, value in data.items():
for validator in cls._get_validators(name):
try:
validator(value)
except ValidationError as e:
e.model = cls
e.column = name
errors[name].append(str(e))
if errors:
raise ValidationErrors(errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def register(namespace, base_classes: Tuple[type], properties: Dict[str, Any]) -> None:
'''Register a Fixture class in namespace with the given properties.
Creates a Fixture class (not object) and inserts it into the provided
namespace. The properties is a dict but allows functions to reference other
properties and acts like a small DSL (domain specific language). This is
really just a declarative way to compose data about a test fixture and make
it repeatable.
Files calling this function are expected to house one or more Fixtures and
have a name that ends with a UUID without its hyphens. For example:
foo_38de9ceec5694c96ace90c9ca37e5bcb.py. This UUID is used to uniquely
track the Fixture through the test suite and allow Fixtures to scale without
concern.
**Parameters**
:``namespace``: dictionary to insert the generated class into
:``base_classes``: list of classes the new class should inherit
:``properties``: dictionary of properties with their values
Properties can have the following forms:
:functions: invoked with the Fixture as it's argument
:classes: instantiated without any arguments (unless it subclasses
``torment.fixtures.Fixture`` in which case it's passed context)
:literals: any standard python type (i.e. int, str, dict)
.. note::
function execution may error (this will be emitted as a logging event).
functions will continually be tried until they resolve or the same set
of functions is continually erroring. These functions that failed to
resolve are left in tact for later processing.
Properties by the following names also have defined behavior:
:description: added to the Fixture's description as an addendum
:error: must be a dictionary with three keys:
:class: class to instantiate (usually an exception)
:args: arguments to pass to class initialization
:kwargs: keyword arguments to pass to class initialization
:mocks: dictionary mapping mock symbols to corresponding values
Properties by the following names are reserved and should not be used:
* name
'''
# ensure we have a clean copy of the data
# and won't stomp on re-uses elsewhere in
# someone's code
props = copy.deepcopy(properties)
desc = props.pop('description', None) # type: Union[str, None]
caller_frame = inspect.stack()[1]
caller_file = caller_frame[1]
caller_module = inspect.getmodule(caller_frame[0])
my_uuid = uuid.UUID(os.path.basename(caller_file).replace('.py', '').rsplit('_', 1)[-1])
class_name = _unique_class_name(namespace, my_uuid)
@property
def description(self) -> str:
_ = super(self.__class__, self).description
if desc is not None:
_ += '—' + desc
return _
def __init__(self, context: 'torment.TestContext') -> None:
super(self.__class__, self).__init__(context)
functions = {}
for name, value in props.items():
if name == 'error':
self.error = value['class'](*value.get('args', ()), **value.get('kwargs', {}))
continue
if inspect.isclass(value):
if issubclass(value, Fixture):
value = value(self.context)
else:
value = value()
if inspect.isfunction(value):
functions[name] = value
continue
setattr(self, name, value)
_resolve_functions(functions, self)
self.initialize()
def setup(self) -> None:
if hasattr(self, 'mocks'):
logger.debug('self.mocks: %s', self.mocks)
for mock_symbol, mock_result in self.mocks.items():
if _find_mocker(mock_symbol, self.context)():
_prepare_mock(self.context, mock_symbol, **mock_result)
super(self.__class__, self).setup()
namespace[class_name] = type(class_name, base_classes, {
'description': description,
'__init__': __init__,
'__module__': caller_module,
'setup': setup,
'uuid': my_uuid,
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None:
'''Sets return value or side effect of symbol's mock in context.
.. seealso:: :py:func:`_find_mocker`
**Parameters**
:``context``: the search context
:``symbol``: the symbol to be located
:``return_value``: pass through to mock ``return_value``
:``side_effect``: pass through to mock ``side_effect``
'''
methods = symbol.split('.')
index = len(methods)
mock = None
while index > 0:
name = 'mocked_' + '_'.join(methods[:index]).lower()
logger.debug('name: %s', name)
if hasattr(context, name):
mock = getattr(context, name)
break
index -= 1
logger.debug('mock: %s', mock)
if mock is not None:
mock = functools.reduce(getattr, methods[index:], mock)
logger.debug('mock: %s', mock)
if return_value is not None:
mock.return_value = return_value
if side_effect is not None:
mock.side_effect = side_effect
mock.reset_mock() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _find_mocker(symbol: str, context: 'torment.contexts.TestContext') -> Callable[[], bool]:
'''Find method within the context that mocks symbol.
Given a symbol (i.e. ``tornado.httpclient.AsyncHTTPClient.fetch``), find
the shortest ``mock_`` method that resembles the symbol. Resembles means
the lowercased and periods replaced with underscores.
If no match is found, a dummy function (only returns False) is returned.
**Parameters**
:``symbol``: the symbol to be located
:``context``: the search context
**Return Value(s)**
The method used to mock the symbol.
**Examples**
Assuming the symbol is ``tornado.httpclient.AsyncHTTPClient.fetch``, the
first of the following methods would be returned:
* ``mock_tornado``
* ``mock_tornado_httpclient``
* ``mock_tornado_httpclient_asynchttpclient``
* ``mock_tornado_httpclient_asynchttpclient_fetch``
'''
components = []
method = None
for component in symbol.split('.'):
components.append(component.lower())
name = '_'.join([ 'mock' ] + components)
if hasattr(context, name):
method = getattr(context, name)
break
if method is None:
logger.warn('no mocker for %s', symbol)
def noop(*args, **kwargs):
return False
method = noop
return method |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _resolve_functions(functions: Dict[str, Callable[[Any], Any]], fixture: Fixture) -> None:
'''Apply functions and collect values as properties on fixture.
Call functions and apply their values as properteis on fixture.
Functions will continue to get applied until no more functions resolve.
All unresolved functions are logged and the last exception to have
occurred is also logged. This function does not return but adds the
results to fixture directly.
**Parameters**
:``functions``: dict mapping function names (property names) to
callable functions
:``fixture``: Fixture to add values to
'''
exc_info = last_function = None
function_count = float('inf')
while function_count > len(functions):
function_count = len(functions)
for name, function in copy.copy(functions).items():
try:
setattr(fixture, name, copy.deepcopy(function(fixture)))
del functions[name]
except:
exc_info = sys.exc_info()
logger.debug('name: %s', name)
logger.debug('exc_info: %s', exc_info)
last_function = name
if len(functions):
logger.warning('unprocessed Fixture properties: %s', ','.join(functions.keys()))
logger.warning('last exception from %s.%s:', fixture.name, last_function, exc_info = exc_info)
setattr(fixture, '_last_resolver_exception', ( last_function, exc_info, ))
for name, function in copy.copy(functions).items():
setattr(fixture, name, function) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
'''Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid.
'''
count = 0
name = original_name = 'f_' + uuid.hex
while name in namespace:
count += 1
name = original_name + '_' + str(count)
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self) -> None:
'''Calls sibling with exception expectation.'''
with self.context.assertRaises(self.error.__class__) as error:
super().run()
self.exception = error.exception |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cache_keys(keys):
"""Allow debugging via PyCharm""" |
d = known_keys()
known_names = dict(zip(d.values(), d.keys()))
for k in keys:
i = (ord(k),) if len(k) == 1 else known_names[k]
_key_cache.insert(0, i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_keycodes():
"""Read keypress giving a tuple of key codes A 'key code' is the ordinal value of characters read For example, pressing 'A' will give (65,) """ |
try:
return _key_cache.pop()
except IndexError:
pass
result = []
terminators = 'ABCDFHPQRS~'
with TerminalContext():
code = get_ord()
result.append(code)
if code == 27:
with TimerContext(0.1) as timer:
code = get_ord()
if not timer.timed_out:
result.append(code)
result.append(get_ord())
if 64 < result[-1] < 69:
pass
elif result[1] == 91:
while True:
code = get_ord()
result.append(code)
if chr(code) in terminators:
break
return tuple(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_key():
"""Get a key from the keyboard as a string A 'key' will be a single char, or the name of an extended key """ |
character_name = chr
codes = _get_keycodes()
if len(codes) == 1:
code = codes[0]
if code >= 32:
return character_name(code)
return control_key_name(code)
return get_extended_key_name(codes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_ports_on_br(self, bridge='br-ex', ports=['eth3']):
"""Check ports exist on bridge. ovs-vsctl list-ports bridge """ |
LOG.info("RPC: check_ports_on_br bridge: %s, ports: %s" %
(bridge, ports))
cmd = ['ovs-vsctl', 'list-ports', bridge]
stdcode, stdout = agent_utils.execute(cmd, root=True)
data = dict()
if stdcode == 0:
for port in ports:
if port in stdout:
data[port] = True
stdout.remove(port)
else:
data[port] = False
return agent_utils.make_response(code=stdcode, data=data)
# execute failed.
message = stdout.pop(0)
return agent_utils.make_response(code=stdcode,
message=message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ping(self, ips, boardcast=False, count=2, timeout=2, interface=None):
"""Ping host or broadcast. ping host -c 2 -W 2 """ |
cmd = ['ping', '-c', str(count), '-W', str(timeout)]
True if not interface else cmd.extend(['-I', interface])
True if not boardcast else cmd.append('-b')
# Batch create subprocess
data = dict()
try:
for ip in ips:
stdcode, stdout = agent_utils.execute(cmd + [ip])
if stdcode:
data[ip] = 100
else:
pattern = r',\s([0-9]+)%\spacket\sloss'
data[ip] = re.search(pattern, stdout[-2]).groups()[0]
return agent_utils.make_response(code=0, data=data)
except Exception as e:
message = e.message
return agent_utils.make_response(code=1, message=message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_vlan_to_interface(self, interface, vlan_id):
"""Add vlan interface. ip link add link eth0 name eth0.10 type vlan id 10 """ |
subif = '%s.%s' % (interface, vlan_id)
vlan_id = '%s' % vlan_id
cmd = ['ip', 'link', 'add', 'link', interface, 'name',
subif, 'type', 'vlan', 'id', vlan_id]
stdcode, stdout = agent_utils.execute(cmd, root=True)
if stdcode == 0:
return agent_utils.make_response(code=stdcode)
# execute failed.
message = stdout.pop(0)
return agent_utils.make_response(code=stdcode, message=message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_interface(self, interface='eth0'):
"""Interface info. ifconfig interface """ |
LOG.info("RPC: get_interface interfae: %s" % interface)
code, message, data = agent_utils.get_interface(interface)
return agent_utils.make_response(code, message, data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_link(self, interface, cidr):
"""Setup a link. ip addr add dev interface ip link set dev interface up """ |
# clear old ipaddr in interface
cmd = ['ip', 'addr', 'flush', 'dev', interface]
agent_utils.execute(cmd, root=True)
ip = IPNetwork(cidr)
cmd = ['ip', 'addr', 'add', cidr, 'broadcast',
str(ip.broadcast), 'dev', interface]
stdcode, stdout = agent_utils.execute(cmd, root=True)
if stdcode == 0:
cmd = ['ip', 'link', 'set', 'dev', interface, 'up']
stdcode, stdout = agent_utils.execute(cmd, root=True)
if stdcode == 0:
return agent_utils.make_response(code=stdcode)
# execute failed.
message = stdout.pop(0)
return agent_utils.make_response(code=stdcode, message=message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_iperf_server(self, protocol='TCP', port=5001, window=None):
"""iperf -s """ |
iperf = iperf_driver.IPerfDriver()
try:
data = iperf.start_server(protocol='TCP', port=5001, window=None)
return agent_utils.make_response(code=0, data=data)
except:
message = 'Start iperf server failed!'
return agent_utils.make_response(code=1, message=message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isreference(a):
""" Tell whether a variable is an object reference. Due to garbage collection, some objects happen to get the id of a distinct variable. As a consequence, linking is not ready yet and `isreference` returns ``False``. """ |
return False
return id(a) != id(copy.copy(a))
check = ('__dict__', '__slots__')
for attr in check:
try:
getattr(a, attr)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
else:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup_type(storable_type):
""" Look for the Python type that corresponds to a storable type name. """ |
if storable_type.startswith('Python'):
_, module_name = storable_type.split('.', 1)
else:
module_name = storable_type
#type_name, module_name = \
names = [ _name[::-1] for _name in module_name[::-1].split('.', 1) ]
if names[1:]:
type_name, module_name = names
else:
type_name = names[0]
return eval(type_name)
try:
module = importlib.import_module(module_name)
python_type = getattr(module, type_name)
except (ImportError, AttributeError):
python_type = None
return python_type |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poke(exposes):
""" Default serializer factory. Arguments: exposes (iterable):
attributes to serialized. Returns: callable: serializer (`poke` routine). """ |
def _poke(store, objname, obj, container, visited=None, _stack=None):
try:
sub_container = store.newContainer(objname, obj, container)
except (SystemExit, KeyboardInterrupt):
raise
except:
raise ValueError('generic poke not supported by store')
#_stack = _add_to_stack(_stack, objname)
for iobjname in exposes:
try:
iobj = getattr(obj, iobjname)
except AttributeError:
pass
else:
store.poke(iobjname, iobj, sub_container, visited=visited, \
_stack=_stack)
return _poke |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poke_assoc(store, objname, assoc, container, visited=None, _stack=None):
""" Serialize association lists. """ |
try:
sub_container = store.newContainer(objname, assoc, container)
except (SystemExit, KeyboardInterrupt):
raise
except:
raise ValueError('generic poke not supported by store')
escape_keys = assoc and not all(isinstance(iobjname, strtypes) for iobjname,_ in assoc)
reported_item_counter = 0
escaped_key_counter = 0
try:
if escape_keys:
store.setRecordAttr('key', 'escaped', sub_container)
verbose = store.verbose # save state
for obj in assoc:
store.poke(str(escaped_key_counter), obj, sub_container, \
visited=visited, _stack=_stack)
escaped_key_counter += 1
if store.verbose:
reported_item_counter += 1
if reported_item_counter == 9:
store.verbose = False
print('...')
store.verbose = verbose # restore state
else:
for iobjname, iobj in assoc:
store.poke(iobjname, iobj, sub_container, visited=visited, \
_stack=_stack)
except TypeError as e:
msg = 'wrong type for keys in associative list'
if e.args[0].startswith(msg):
raise
else:
raise TypeError("{}:\n\t{}".format(msg, e.args[0])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_peek(python_type, exposes):
""" Autoserializer factory. Works best in Python 3. Arguments: python_type (type):
type constructor. exposes (iterable):
sequence of attributes. Returns: callable: deserializer (`peek` routine). """ |
with_args = False
make = python_type
try:
make()
except (SystemExit, KeyboardInterrupt):
raise
except:
make = lambda: python_type.__new__(python_type)
try:
make()
except (SystemExit, KeyboardInterrupt):
raise
except:
make = lambda args: python_type.__new__(python_type, *args)
with_args = True
def missing(attr):
return AttributeError("can't set attribute '{}' ({})".format(attr, python_type))
if with_args:
def peek(store, container, _stack=None):
state = []
for attr in exposes: # force order instead of iterating over `container`
#print((attr, attr in container)) # debugging
if attr in container:
state.append(store.peek(attr, container, _stack=_stack))
else:
state.append(None)
return make(state)
elif '__dict__' in exposes:
def peek(store, container, _stack=None):
obj = make()
for attr in container:
val = store.peek(attr, container, _stack=_stack)
try:
setattr(obj, attr, val)
except AttributeError:
raise missing(attr)
return obj
else:
def peek(store, container, _stack=None):
obj = make()
for attr in exposes: # force order instead of iterating over `container`
#print((attr, attr in container)) # debugging
if attr in container:
val = store.peek(attr, container, _stack=_stack)
else:
val = None
try:
setattr(obj, attr, val)
except AttributeError:
raise missing(attr)
return obj
return peek |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unsafe_peek(init):
""" Deserialize all the attributes available in the container and pass them in the same order as they come in the container. This is a factory function; returns the actual `peek` routine. Arguments: init: type constructor. Returns: callable: deserializer (`peek` routine). """ |
def peek(store, container, _stack=None):
return init(*[ store.peek(attr, container, _stack=_stack) for attr in container ])
return peek |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peek_with_kwargs(init, args=[]):
""" Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable):
type constructor. args (iterable):
arguments NOT to be keyworded; order does matter. Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments. """ |
def peek(store, container, _stack=None):
return init(\
*[ store.peek(attr, container, _stack=_stack) for attr in args ], \
**dict([ (attr, store.peek(attr, container, _stack=_stack)) \
for attr in container if attr not in args ]))
return peek |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peek(init, exposes, debug=False):
""" Default deserializer factory. Arguments: init (callable):
type constructor. exposes (iterable):
attributes to be peeked and passed to `init`. Returns: callable: deserializer (`peek` routine). """ |
def _peek(store, container, _stack=None):
args = [ store.peek(objname, container, _stack=_stack) \
for objname in exposes ]
if debug:
print(args)
return init(*args)
return _peek |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peek_assoc(store, container, _stack=None):
""" Deserialize association lists. """ |
assoc = []
try:
if store.getRecordAttr('key', container) == 'escaped':
for i in container:
assoc.append(store.peek(i, container, _stack=_stack))
else:
for i in container:
assoc.append((store.strRecord(i, container), store.peek(i, container, _stack=_stack)))
#print(assoc) # debugging
except TypeError as e:
try:
for i in container:
pass
raise e
except TypeError:
raise TypeError("container is not iterable; peek is not compatible\n\t{}".format(e.args[0]))
return assoc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def most_exposes(python_type):
""" Core engine for the automatic generation of storable instances. Finds the attributes exposed by the objects of a given type. Mostly Python3-only. Does not handle types which `__new__` method requires extra arguments either. Arguments: python_type (type):
object type. Returns: list: attributes exposed. """ |
_exposes = set()
try:
# list all standard class attributes and methods:
do_not_expose = set(python_type.__dir__(object) + \
['__slots__', '__module__', '__weakref__']) # may raise `AttributeError`
empty = python_type.__new__(python_type) # may raise `TypeError`
except AttributeError: # Py2 does not have `__dir__`
try:
_exposes = python_type.__slots__
except AttributeError:
pass
except TypeError: # `__new__` requires input arguments
for _workaround in storable_workarounds:
try:
_exposes = _workaround(python_type)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
else:
break
else:
# note that slots from parent classes are not in `__dict__` (like all slots)
# and - in principle - not in `__slots__` either.
all_members = empty.__dir__() # all slots are supposed to appear in this list
for attr in all_members:
if attr in do_not_expose:
# note that '__dict__' is in `do_not_expose` (comes from `object`)
continue
try: # identify the methods and properties
getattr(empty, attr)
except AttributeError as e: # then `attr` might be a slot
# properties can still throw an `AttributeError`;
# try to filter some more out
if e.args:
msg = e.args[0]
if msg == attr or msg.endswith("' object has no attribute '{}'".format(attr)):
_exposes.add(attr)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
for attr in ('__dict__',):
if attr in all_members:
_exposes.add(attr)
return list(_exposes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek):
""" Default mechanics for building the storable instance for a type. Arguments: python_type (type):
type. exposes (iterable):
attributes exposed by the type. version (tuple):
version number. storable_type (str):
universal string identifier for the type. peek (callable):
peeking routine. Returns: Storable: storable instance. """ |
if not exposes:
for extension in expose_extensions:
try:
exposes = extension(python_type)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
else:
if exposes:
break
if not exposes:
raise AttributeError('`exposes` required for type: {!r}'.format(python_type))
return Storable(python_type, key=storable_type, \
handlers=StorableHandler(version=version, exposes=exposes, \
poke=poke(exposes), peek=peek(python_type, exposes))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def not_storable(_type):
""" Helper for tagging unserializable types. Arguments: _type (type):
type to be ignored. Returns: Storable: storable instance that does not poke. """ |
return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def force_auto(service, _type):
""" Helper for forcing autoserialization of a datatype with already registered explicit storable instance. Arguments: service (StorableService):
active storable service. _type (type):
type to be autoserialized. **Not tested** """ |
storable = service.byPythonType(_type, istype=True)
version = max(handler.version[0] for handler in storable.handlers) + 1
_storable = default_storable(_type, version=(version, ))
storable.handlers.append(_storable.handlers[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def poke_native(getstate):
""" Serializer factory for types which state can be natively serialized. Arguments: getstate (callable):
takes an object and returns the object's state to be passed to `pokeNative`. Returns: callable: serializer (`poke` routine). """ |
def poke(service, objname, obj, container, visited=None, _stack=None):
service.pokeNative(objname, getstate(obj), container)
return poke |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peek_native(make):
""" Deserializer factory for types which state can be natively serialized. Arguments: make (callable):
type constructor. Returns: callable: deserializer (`peek` routine) """ |
def peek(service, container, _stack=None):
return make(service.peekNative(container))
return peek |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handler(init, exposes, version=None):
""" Simple handler with default `peek` and `poke` procedures. Arguments: init (callable):
type constructor. exposes (iterable):
attributes to be (de-)serialized. version (tuple):
version number. Returns: StorableHandler: storable handler. """ |
return StorableHandler(poke=poke(exposes), peek=peek(init, exposes), version=version) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def namedtuple_storable(namedtuple, *args, **kwargs):
""" Storable factory for named tuples. """ |
return default_storable(namedtuple, namedtuple._fields, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pokeVisited(self, objname, obj, record, existing, visited=None, _stack=None, **kwargs):
""" Serialize an already serialized object. If the underlying store supports linking, this is the place where to make links. The default implementation delegates to :meth:`pokeStorable` or :meth:`pokeNative`. Arguments: objname (any):
record reference. obj (any):
object to be serialized. existing (any):
absolute reference of the record which the object was already serialized into. visited (dict):
already serialized objects. _stack (CallStack):
stack of parent object names. """ |
if self.hasPythonType(obj):
storable = self.byPythonType(obj).asVersion()
self.pokeStorable(storable, objname, obj, record, visited=visited, \
_stack=_stack, **kwargs)
else:
try:
self.pokeNative(objname, obj, record)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.dump_stack(_stack)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs):
""" Generate a default storable instance. Arguments: python_type (type):
Python type of the object. storable_type (str):
storable type name. version (tuple):
version number of the storable handler. Returns: StorableHandler: storable instance. Extra keyword arguments are passed to :meth:`registerStorable`. """ |
if python_type is None:
python_type = lookup_type(storable_type)
if self.verbose:
print('generating storable instance for type: {}'.format(python_type))
self.storables.registerStorable(default_storable(python_type, \
version=version, storable_type=storable_type), **kwargs)
return self.byPythonType(python_type, True).asVersion(version) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, name: str, default: Any = None) -> Any: """Return the first value, either the default or actual""" |
return super().get(name, [default])[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getlist(self, name: str, default: Any = None) -> List[Any]: """Return the entire list""" |
return super().get(name, default) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, path):
"""Run path against filter sets and return True if all pass""" |
# Exclude hidden files and folders with '.' prefix
if os.path.basename(path).startswith('.'):
return False
# Check that current path level is more than min path and less than max path
if not self.check_level(path):
return False
if self.filters:
if not self._level_filters(path):
return False
# Force include and exclude iterations to be strings in case of integer filters
# Handle exclusions
if self.to_exclude:
if any(str(ex).lower() in path.lower() for ex in self.to_exclude):
return False
# Handle inclusions
if self.to_include:
if not any(str(inc).lower() in path.lower() for inc in self.to_include):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comment_thread(cls, backend, *args, **kwargs):
"""Create a comment thread for the desired backend. :arg backend: String name of backend (e.g., 'file', 'github', 'redis', etc.). :arg *args, **kwargs: Arguments to be passed to contructor for that backend. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- :returns: A CommentThread sub-class for the given backend. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Some simple syntatic sugar for creating the desired backend. """ |
ct_cls = cls._known_backends.get(backend)
if not ct_cls:
return None
return ct_cls(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_dossier(data):
'''
Find dossier with reference matching either 'ref_an' or 'ref_sen',
create it if not found. Ensure its reference is 'ref_an' if both fields
are present.
'''
changed = False
dossier = None
reffield = None
for field in [k for k in ('ref_an', 'ref_sen') if k in data]:
try:
dossier = Dossier.objects.get(reference=data[field])
reffield = field
break
except Dossier.DoesNotExist:
pass
if dossier is None:
reffield = 'ref_an' if 'ref_an' in data else 'ref_sen'
dossier = Dossier(reference=data[reffield])
logger.debug('Created dossier %s' % data[reffield])
changed = True
if 'ref_an' in data and reffield != 'ref_an':
logger.debug('Changed dossier reference to %s' % data['ref_an'])
dossier.reference = data['ref_an']
changed = True
return dossier, changed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.