text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_queryset_to_csv(qs, filename):
"""Write a QuerySet or ValuesListQuerySet to a CSV file based on djangosnippets by zbyte64 and http://palewi.re Arguments: qs (QuerySet or ValuesListQuerySet):
The records your want to write to a text file (UTF-8) filename (str):
full path and file name to write to """ |
model = qs.model
with open(filename, 'w') as fp:
writer = csv.writer(fp)
try:
headers = list(qs._fields)
except:
headers = [field.name for field in model._meta.fields]
writer.writerow(headers)
for obj in qs:
row = []
for colnum, field in enumerate(headers):
try:
value = getattr(obj, field, obj[colnum])
except:
value = ''
if callable(value):
value = value()
if isinstance(value, basestring):
value = value.encode("utf-8")
else:
value = str(value).encode("utf-8")
row += [value]
writer.writerow(row) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_date(dt, date_parser=parse_date):
"""Coerce a datetime or string into datetime.date object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp):
time or date to be coerced into a `datetime.time` object Returns: datetime.time: Time of day portion of a `datetime` string or object datetime.date(1970, 1, 1) datetime.date(1970, 1, 1) True datetime.date(1999, 12, 31) """ |
if not dt:
return datetime.date(1970, 1, 1)
if isinstance(dt, basestring):
dt = date_parser(dt)
try:
dt = dt.timetuple()[:3]
except:
dt = tuple(dt)[:3]
return datetime.date(*dt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_time(dt, date_parser=parse_date):
"""Ignore date information in a datetime string or object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp):
time or date to be coerced into a `datetime.time` object Returns: datetime.time: Time of day portion of a `datetime` string or object datetime.time(0, 0) datetime.time(23, 59) datetime.time(23, 59, 59) """ |
if not dt:
return datetime.time(0, 0)
if isinstance(dt, basestring):
try:
dt = date_parser(dt)
except:
print 'Unable to parse {0}'.format(repr(dt))
print_exc()
return datetime.time(0, 0)
try:
dt = dt.timetuple()[3:6]
except:
dt = tuple(dt)[3:6]
return datetime.time(*dt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):
"""Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict Arguments: path (str):
file or folder to retrieve CSV files and `pandas.DataFrame`s from ext (str):
file name extension (to filter files by) date_parser (function):
if the MultiIndex can be interpretted as a datetime, this parser will be used Returns: dict of DataFrame: { file_path: flattened_data_frame } """ |
date_parser = date_parser or (lambda x: x)
dotted_ext, dotted_output_ext = None, None
if ext != None and output_ext != None:
dotted_ext = ('' if ext.startswith('.') else '.') + ext
dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext
table = {}
for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity):
file_path = file_properties['path']
if output_ext and (dotted_output_ext + '.') in file_path:
continue
df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)
df = flatten_dataframe(df, verbosity=verbosity)
if dotted_ext != None and dotted_output_ext != None:
df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext)
return table |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash_model_values(model, clear=True, hash_field='values_hash', hash_fun=hash, ignore_pk=True, ignore_fields=[]):
"""Hash values of DB table records to facilitate tracking changes to the DB table Intended for comparing records in one table to those in another (with potentially differing id/pk values) For example, changes to a table in a read-only MS SQL database can be quickly identified and mirrored to a writeable PostGRE DB where these hash values are stored along side the data. """ |
qs = getattr(model, 'objects', model)
model = qs.model
if ignore_pk:
ignore_fields += [model._meta.pk.name]
if not hasattr(model, hash_field):
warnings.warn("%r doesn't have a field named %s in which to store a hash value. Skipping." % (model, hash_field))
return
for obj in qs:
# ignore primary key (id field) when hashing values
h = hash_fun(tuple([getattr(obj, k) for k in obj._meta.get_all_field_names() if k not in ignore_fields]))
tracking_obj, created = ChangeLog.get_or_create(app=model._meta.app_label, model=model._meta.object_name, primary_key=obj.pk)
tracking_obj.update(hash_value=h) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def bulk_update(object_list, ignore_errors=False, delete_first=False, verbosity=0):
'''Bulk_create objects in provided list of model instances, delete database rows for the original pks in the object list.
Returns any delta in the number of rows in the database table that resulted from the update.
If nonzero, an error has likely occurred and database integrity is suspect.
# delete_first = True is required if your model has unique constraints that would be violated by creating duplicate records
# FIXME: check for unique constraints and raise exception if any exist (won't work because new objects may violate!)
'''
if not object_list:
return 0
model = object_list[0].__class__
N_before = model.objects.count()
pks_to_delete = set()
for i, obj in enumerate(object_list):
pks_to_delete.add(obj.pk)
if delete_first:
object_list[i] = deepcopy(obj)
object_list[i].pk = None
if verbosity > 1:
print 'Creating %d %r objects.' % (len(object_list), model)
print 'BEFORE: %d' % model.objects.count()
if not delete_first:
model.objects.bulk_create(object_list)
if verbosity > 0:
print 'Deleting %d objects with pks: %r ........' % (len(pks_to_delete), pks_to_delete)
objs_to_delete = model.objects.filter(pk__in=pks_to_delete)
num_to_delete = objs_to_delete.count()
if num_to_delete != len(pks_to_delete):
msg = 'Attempt to delete redundant pks (len %d)! Queryset has count %d. Query was `filter(pk__in=%r). Queryset = %r' % (
len(pks_to_delete), num_to_delete, pks_to_delete, objs_to_delete)
if ignore_errors:
if verbosity > 0:
print msg
else:
raise RuntimeError(msg)
if verbosity > 1:
print 'Queryset to delete has %d objects' % objs_to_delete.count()
objs_to_delete.delete()
if delete_first:
model.objects.bulk_create(object_list)
if verbosity > 1:
print 'AFTER: %d' % model.objects.count()
N_after = model.objects.count()
if ignore_errors:
if verbosity > 1:
print 'AFTER: %d' % N_after
else:
if N_after != N_before:
print 'Number of records in %r changed by %d during bulk_create of %r.\n ' % (model, N_after - N_before, object_list)
msg = 'Records before and after bulk_create are not equal!!! Before=%d, After=%d' % (N_before, N_after)
raise RuntimeError(msg)
return N_before - N_after |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_queryset_batches(queryset, batch_len=1000, verbosity=1):
"""Filter a queryset by the pk in such a way that no batch is larger than the requested batch_len SEE ALSO: pug.nlp.util.generate_slices True """ |
if batch_len == 1:
for obj in queryset:
yield obj
N = queryset.count()
if not N:
raise StopIteration("Queryset is empty!")
if N == 1:
for obj in queryset:
yield obj
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
pk_queryset = queryset.filter(pk__isnull=False).values_list('pk', flat=True).order_by('pk')
N_nonnull = pk_queryset.count()
N_batches = int(N_nonnull/float(batch_len)) + 1
if verbosity > 1:
print 'Splitting %d primary_key values (%d nonnull) from %r into %d querysets of size %d or smaller. First loading pks into RAM...' % (N, N_nonnull, queryset.model, N_batches, batch_len)
nonnull_pk_list = tuple(pk_queryset)
pk_list = []
if verbosity > 1:
print 'Extracting the %d dividing (fencepost) primary keys for use in splitting the querysets with filter queries...' % (N_batches + 1)
for j in range(N_batches - 1):
pk_list += [(nonnull_pk_list[j*batch_len], nonnull_pk_list[(j+1)*batch_len - 1])]
last_batch_len = N_nonnull - (N_batches-1) * batch_len
pk_list += [(nonnull_pk_list[(N_batches-1) * batch_len], nonnull_pk_list[N-1])]
if verbosity > 1:
del(nonnull_pk_list)
print 'Yielding the %d batches according to the %d dividing (fencepost) primary keys...' % (N_batches, len(pk_list))
for j in range(N_batches):
if verbosity > 0:
pbar.update(i)
if j < N_batches - 1:
i += batch_len
else:
i += last_batch_len
# inclusive inequality ensures that even if PKs are repeated they will all be included in the queryset returned
yield queryset.filter(pk__gte=pk_list[j][0], pk__lte=pk_list[j][1])
if verbosity > 0:
pbar.finish() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optimize_filter_dict(filter_dict, trgm=True):
"""Improve query speed for a Django queryset `filter` or `exclude` kwargs dict WARNING: Wtthout `trgm`, this only improves the speed of exclude filters by 0.4% Arguments: filter_dict (dict):
kwargs for Django ORM queryset `filter` and `exclude` queries trgm (bool):
whether to assume the Django ORM trigram (djorm-trgm) extension is available Examples: True True True """ |
optimized = {}
for k, v in filter_dict.iteritems():
if k.endswith('__in'):
v = set(v)
if len(v) == 1:
optimized[k[:-4]] = tuple(v)[0]
else:
optimized[k] = v
else:
optimized[k] = v
# This is the only optimization that actuall does some good
if trgm:
optimized_copy = dict(optimized)
for k, v in optimized_copy.iteritems():
if k.endswith('__contains'):
optimized[k[:-10] + '__similar'] = v
elif k.endswith('__icontains'):
optimized[k[:-11] + '__similar'] = v
return optimized |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_json(model, batch_len=200000, use_natural_keys=True, verbosity=1):
"""Dump database records to .json Django fixture file, one file for each batch of `batch_len` records Files are suitable for loading with "python manage.py loaddata folder_name_containing_files/*". """ |
model = get_model(model)
N = model.objects.count()
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % (N,), pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
JSONSerializer = serializers.get_serializer("json")
jser = JSONSerializer()
if verbosity > 0:
pbar.update(0)
for i, partial_qs in enumerate(util.generate_slices(model.objects.all(), batch_len=batch_len)):
with open(model._meta.app_label.lower() + '--' + model._meta.object_name.lower() + '--%04d.json' % i, 'w') as fpout:
if verbosity > 0:
pbar.update(i*batch_len)
jser.serialize(partial_qs, indent=1, stream=fpout, use_natural_keys=use_natural_keys)
if verbosity > 0:
pbar.finish() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_exclude_dicts(filter_dict=None, exclude_dict=None, name='acctno', values=[], swap=False):
"""Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values The last, critical step in generating Django ORM kwargs dicts from a natural language query. Properly parses "NOT" unary operators on each field value in the list. Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax. Examples: True """ |
filter_dict = filter_dict or {}
exclude_dict = exclude_dict or {}
if not name.endswith('__in'):
name += '__in'
filter_dict[name], exclude_dict[name] = [], []
for v in values:
# "NOT " means switch from include (filter) to exclude for that one account number
if v.startswith('NOT '):
exclude_dict[name] += [v[4:]]
else:
filter_dict[name] += [v]
if swap:
return exclude_dict, filter_dict
return filter_dict, exclude_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_kwargs(self, kwargs, prefix='default_', delete=True):
""" set self attributes based on kwargs, optionally deleting kwargs that are processed """ |
processed = []
for k in kwargs:
if hasattr(self, prefix + k):
processed += [k]
setattr(self, prefix + k, kwargs[k])
for k in processed:
del(kwargs[k])
return kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_column_wise_lists(self, transpose=False):
"""Generator over the columns of lists""" |
# make this a generator of generators?
if transpose:
ans = self.from_row_wise_lists(self.as_column_wise_lists(transpose=False))
return ans
#print self
return self.values() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randint(self, a: int, b: int, n: Optional[int] = None) -> Union[List[int], int]: """ Generate n numbers as a list or a single one if no n is given. n is used to minimize the number of requests made and return type changes to be compatible with :py:mod:`random`'s interface """ |
max_n = self.config.MAX_NUMBER_OF_INTEGERS
return self._generate_randoms(self._request_randints, max_n=max_n, a=a, b=b, n=n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_quota(self):
""" If IP can't make requests, raise BitQuotaExceeded. Called before generating numbers. """ |
self._request_remaining_quota_if_unset()
if self.quota_estimate < self.quota_limit:
raise BitQuotaExceeded(self.quota_estimate) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _construct_timeseries(self, timeseries, constraints={}):
""" wraps response_from for timeseries calls, returns the resulting dict """ |
self.response_from(timeseries, constraints)
if self.response == None:
return None
return {'data':self.response['data'],
'period':self.response['period'],
'start time':datetime.datetime.fromtimestamp(self.response['start_time']),
'end time':datetime.datetime.fromtimestamp(self.response['end_time'])} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def edit_channel_info(self, new_ch_name, ch_dct):
"""Parent widget calls this whenever the user edits channel info. """ |
self.ch_name = new_ch_name
self.dct = ch_dct
if ch_dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(self.ch_name)))
self.generateToolTip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connections(request, edges):
""" Plot a force-directed graph based on the edges provided """ |
edge_list, node_list = parse.graph_definition(edges)
data = {'nodes': json.dumps(node_list), 'edges': json.dumps(edge_list)}
return render_to_response('miner/connections.html', data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def csv_response_from_context(context=None, filename=None, field_names=None, null_string='', eval_python=True):
"""Generate the response for a Download CSV button from data within the context dict The CSV data must be in one of these places/formats: * context as a list of lists of python values (strings for headers in first list) * context['data']['d3data'] as a string in json format (python) for a list of lists of repr(python_value)s * context['data']['cases'] as a list of lists of python values (strings for headers in first list) * context['data']['cases'] as a django queryset or iterable of model instances (list, tuple, generator) If the input data is a list of lists (table) that has more columns that rows it will be trasposed before being processed """ |
filename = filename or context.get('filename') or 'table_download.csv'
field_names = field_names or context.get('field_names', [])
# FIXME: too slow!
if field_names and all(field_names) and all(all(c in (string.letters + string.digits + '_.') for c in s) for s in field_names):
eval_python=False
data = context
# find the data table within the context dict. should be named 'data.cases' or 'data.d3data'
if not (isinstance(data, (tuple, list)) and isinstance(data[0], (tuple, list))):
data = json.loads(data.get('data', {}).get('d3data', '[[]]'))
if not data or not any(data):
data = context.get('data', {}).get('cases', [[]])
if not isinstance(data, (list, tuple)) or not isinstance(data[0], (list, tuple)):
data = table_generator_from_list_of_instances(data, field_names=field_names, eval_python=eval_python)
try:
if len(data) < len(data[0]):
data = util.transposed_lists(data) # list(list(row) for row in data)
except TypeError:
# no need to transpose if a generator was provided instead of a list or tuple (anything with a len attribute)
pass
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
writer = csv.writer(response)
for row in data:
newrow = []
for s in row:
try:
newrow.append(s.encode('utf-8')) #handles strings, unicodes, utf-8s
except AttributeError: #will happen when we try to encode a class object or number
newrow.append(s)
except: #not sure it ever will be touched.
newrow.append(unicode(s))
writer.writerow(newrow)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def render_to_response(self, context, indent=None):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context, indent=indent)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report(self, reporter, ignore_nfd=False, ignore_ws=False):
""" Adds the problems that have been found so far to the given Reporter instance. The two keyword args can be used to restrict the error types to be reported. """ |
if self.strip_errors and not ignore_ws:
reporter.add(self.strip_errors, 'leading or trailing whitespace')
if self.norm_errors and not ignore_nfd:
reporter.add(self.norm_errors, 'not in Unicode NFD') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one. Then return from this method when the main application calls for exit, which is a regular command. """ |
# Initialize the file logger
self.log = getLogger()
# Deserialize configuration
self.set_config_command = dill.loads(self.set_config_command)
self.set_configuration(self.set_config_command)
for handler in self.file_handlers:
if isinstance(handler, StreamHandler)\
and (handler.stream == sys.stdout or handler.stream == sys.stderr):
self.critical(LogMessageCommand(text='Cannot use logging.StreamHandler with \'sys.stdout\' nor '
'\'sys.stderr\' because those are reserved by the logger process',
level=logging.CRITICAL))
continue
self.log.addHandler(hdlr=handler)
self.log.setLevel(self.console_level)
while True:
o = dill.loads(self.queue.get())
if isinstance(o, LogMessageCommand):
if o.level == logging.DEBUG:
self.debug(command=o)
elif o.level == logging.INFO:
self.info(command=o)
elif o.level == logging.WARNING:
self.warning(command=o)
elif o.level == logging.ERROR:
self.error(command=o)
elif o.level == logging.CRITICAL:
self.critical(command=o)
elif isinstance(o, UpdateProgressCommand):
self.update(command=o)
elif isinstance(o, NewTaskCommand):
self.set_task(command=o)
elif isinstance(o, FlushCommand):
self.flush()
elif isinstance(o, StacktraceCommand):
self.throw(command=o)
elif isinstance(o, SetConfigurationCommand):
self.set_configuration(command=o)
elif isinstance(o, ExitCommand):
return
elif isinstance(o, SetLevelCommand):
self.set_level(command=o) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def redraw(self):
""" Clears the console and performs a complete redraw of all progress bars and then awaiting logger messages if the minimum time elapsed since the last redraw is enough. """ |
# Check if the refresh time lapse has elapsed and if a change requires to redraw
lapse_since_last_refresh = millis() - self.refresh_timer
if not lapse_since_last_refresh > self.redraw_frequency_millis or not self.changes_made:
return
# If yes, then reset change indicator and chrono
self.changes_made = False
self.refresh_timer = millis()
# Clear the system console
os.system(self.os_flush_command)
# For each task, check if it has complete. If so, start its chrono
# Once the chrono has reached the maximum timeout time, delete the task
# For the other tasks that have not completed yet, redraw them
# Delete tasks that have been marked for deletion
if len(self.to_delete) > 0:
for task_id in self.to_delete:
del self.tasks[task_id]
self.to_delete = []
# If a task has been deleted, recalculate the maximum prefix length to keep progress bars aligned
self.longest_bar_prefix_size = self.longest_bar_prefix_value()
for task_id, task in self.tasks.items():
# If a task has completed, force its value to its maximum to prevent progress bar overflow
# Then start its timeout chrono
if task.progress >= task.total and not task.keep_alive:
# Prevent bar overflow
task.progress = task.total
# Start task's timeout chrono
if not task.timeout_chrono:
task.timeout_chrono = millis()
# If task's chrono has reached the maximum timeout time, mark it for deletion
elif millis() - task.timeout_chrono >= self.task_millis_to_removal:
self.to_delete.append(task_id)
# Redraw the task's progress bar through standard output
self.print_progress_bar(task=task)
# Keep space for future tasks if needed
slots = self.permanent_progressbar_slots - len(self.tasks)
if slots > 0:
for i in range(slots):
sys.stdout.write('\n\t\t---\n')
# Draw some space between bars and messages
if len(self.messages) > 0:
if self.permanent_progressbar_slots > 0 or len(self.tasks) > 0:
sys.stdout.write('\n\n')
# Print all the last log messages through standard output
for m in self.messages:
sys.stdout.write(m)
# Draw some space between messages and exceptions
if len(self.exceptions) > 0:
if len(self.messages) > 0:
sys.stdout.write('\n\n')
# Print all the exceptions through error output
for ex in self.exceptions:
sys.stderr.write(ex) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetServices(self,filename):
"""Returns a list of service objects handling this file type""" |
objlist=[]
for sobj in self.services:
if sobj.KnowsFile(filename) :
objlist.append(sobj)
if len(objlist)==0:
return None
return objlist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GetServiceObj(self,servicename):
"""Given a service name string, returns the object that corresponds to the service""" |
for sobj in self.services:
if sobj.GetName().lower()==servicename.lower():
return sobj
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def object_path(collection, id):
"""Returns path to the backing file of the object with the given ``id`` in the given ``collection``. Note that the ``id`` is made filesystem-safe by "normalizing" its string representation.""" |
_logger.debug(type(id))
_logger.debug(id)
if isinstance(id, dict) and 'id' in id:
id = id['id']
normalized_id = normalize_text(str(id), lcase=False)
return os.path.join(_basepath, collection,
'%s.%s' % (normalized_id, _ext)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_object_at_path(path):
"""Load an object from disk at explicit path""" |
with open(path, 'r') as f:
data = _deserialize(f.read())
return aadict(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_collection(collection, cache_size=1000, cache_cls=LRUCache, **cache_args):
"""Add a collection named ``collection``.""" |
assert collection not in _db
cache = cache_cls(maxsize=cache_size,
missing=lambda id: load_object(collection, id),
**cache_args)
_db[collection] = aadict(cache=cache, indexes={}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(base_path='data', serialize=json.dumps, deserialize=json.loads, file_ext='json'):
"""After you have added your collections, prepare the database for use.""" |
global _basepath, _deserialize, _serialize, _ext
_basepath = base_path
assert callable(serialize)
assert callable(deserialize)
_serialize = serialize
_deserialize = deserialize
_ext = file_ext
_logger.debug('preparing with base path %s and file ext %s',
_basepath, _ext)
assert len(_db)
for collection in _db.keys():
c_path = collection_path(collection)
os.makedirs(c_path, exist_ok=True)
_logger.info('collection "%s": %d objects',
collection, object_count(collection)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def each_object(collection):
"""Yields each object in the given ``collection``. The objects are loaded from cache and failing that, from disk.""" |
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
yield load_object_at_path(path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def each_object_id(collection):
"""Yields each object ID in the given ``collection``. The objects are not loaded.""" |
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_object(collection, obj):
"""Save an object ``obj`` to the given ``collection``. ``obj.id`` must be unique across all other existing objects in the given collection. If ``id`` is not present in the object, a *UUID* is assigned as the object's ``id``. Indexes already defined on the ``collection`` are updated after the object is saved. Returns the object. """ |
if 'id' not in obj:
obj.id = uuid()
id = obj.id
path = object_path(collection, id)
temp_path = '%s.temp' % path
with open(temp_path, 'w') as f:
data = _serialize(obj)
f.write(data)
shutil.move(temp_path, path)
if id in _db[collection].cache:
_db[collection].cache[id] = obj
_update_indexes_for_mutated_object(collection, obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_index(collection, name, fields, transformer=None, unique=False, case_insensitive=False):
""" Add a secondary index for a collection ``collection`` on one or more ``fields``. The values at each of the ``fields`` are loaded from existing objects and their object ids added to the index. You can later iterate the objects of an index via ``each_indexed_object``. If you update an object and call ``save_object``, the index will be updated with the latest values from the updated object. If you delete an object via ``delete_object``, the object will be removed from any indexes on the object's collection. If a function is provided for ``transformer``, the values extracted from each object in the collection will be passed to the ``transformer``. The ``transformer`` should return a list of values that will go into the index. If ``unique`` is true, then there may only be at most one object in the collection with a unique set of values for each the ``fields`` provided. If ``case_insensitive`` is true, then the value stored in the index will be lower-cased and comparisons thereto will be lower-cased as well. """ |
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_to_index(index, obj):
"""Adds the given object ``obj`` to the given ``index``""" |
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _remove_from_index(index, obj):
"""Removes object ``obj`` from the ``index``.""" |
try:
index.value_map[indexed_value(index, obj)].remove(obj.id)
except KeyError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def each_indexed_object(collection, index_name, **where):
"""Yields each object indexed by the index with name ``name`` with ``values`` matching on indexed field values.""" |
index = _db[collection].indexes[index_name]
for id in index.value_map.get(indexed_value(index, where), []):
yield get_object(collection, id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_indexes_for_mutated_object(collection, obj):
"""If an object is updated, this will simply remove it and re-add it to the indexes defined on the collection.""" |
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
_add_to_index(index, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_indexes_for_deleted_object(collection, obj):
"""If an object is deleted, it should no longer be indexed so this removes the object from all indexes on the given collection.""" |
for index in _db[collection].indexes.values():
_remove_from_index(index, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_delta(__string: str) -> datetime.timedelta: """Parse ISO-8601 duration string. Args: __string: Duration string to parse Returns: Parsed delta object """ |
if not __string:
return datetime.timedelta(0)
match = re.fullmatch(r"""
P
((?P<days>\d+)D)?
T?
((?P<hours>\d{1,2})H)?
((?P<minutes>\d{1,2})M)?
((?P<seconds>\d{1,2})?((?:\.(?P<microseconds>\d+))?S)?)
""", __string, re.VERBOSE)
if not match:
raise ValueError('Unable to parse delta {!r}'.format(__string))
match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()}
return datetime.timedelta(**match_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_delta(__timedelta: datetime.timedelta) -> str: """Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration """ |
if __timedelta == datetime.timedelta(0):
return ''
days_s = '{}D'.format(__timedelta.days) if __timedelta.days else ''
hours, minutes = divmod(__timedelta.seconds, 3600)
minutes, seconds = divmod(minutes, 60)
hours_s = '{:02d}H'.format(hours) if hours else ''
minutes_s = '{:02d}M'.format(minutes) if minutes else ''
seconds_s = '{:02d}S'.format(seconds) if seconds else ''
return 'P{}{}{}{}{}'.format(days_s,
'T' if hours or minutes or seconds else '',
hours_s, minutes_s, seconds_s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_datetime(__string: str) -> datetime.datetime: """Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object """ |
if not __string:
datetime_ = datetime.datetime.now(datetime.timezone.utc)
else:
# pylint: disable=no-member
datetime_ = ciso8601.parse_datetime(__string)
if datetime_.tzinfo is None:
datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc)
return datetime_ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_device_info(dev_name):
"""Prints information about the given device. Usage: print_device_info("Dev1") """ |
string_buffer = ctypes.create_string_buffer(1024)
attributes = [pydaq.DAQmx_Dev_ProductType, pydaq.DAQmx_Dev_SerialNum,
pydaq.DAQmx_Dev_AO_PhysicalChans,
pydaq.DAQmx_Dev_CI_PhysicalChans,
pydaq.DAQmx_Dev_CO_PhysicalChans,
pydaq.DAQmx_Dev_DO_Lines]
attribute_names = ['DAQmx_Dev_ProductType',
'DAQmx_Dev_SerialNum',
'DAQmx_Dev_AO_PhysicalChans',
'DAQmx_Dev_CI_PhysicalChans',
'DAQmx_Dev_CO_PhysicalChans',
'DAQmx_Dev_DO_Lines']
ret_values = []
for a in attributes:
pydaq.DAQmxGetDeviceAttribute(dev_name, a, string_buffer)
ret_values.append(str(string_buffer.value))
print('Device Name:\t' + dev_name)
for n, v in zip(attribute_names, ret_values):
print '\t' + n + ':\t' + v |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_device_name_list():
"""Returns a list of device names installed.""" |
dev_names = ctypes.create_string_buffer(1024)
pydaq.DAQmxGetSysDevNames(dev_names, len(dev_names))
return dev_names.value.split(', ') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset_analog_sample_clock(state=False):
"""Reset the clock line. Use this just before starting a run to avoid timing issues. """ |
set_digital_line_state(expt_settings.dev1_clock_out_name, state)
set_digital_line_state(expt_settings.dev2_clock_out_name, state)
set_digital_line_state(expt_settings.dev3_clock_out_name, state)
set_digital_line_state(expt_settings.dev4_clock_out_name, state) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_digital_line_state(line_name, state):
"""Set the state of a single digital line. line_name (str) - The physical name of the line. e.g line_name="Dev1/port0/line3" This should be a single digital line. Specifying more than one would result in unexpected behaviour. For example "Dev1/port0/line0:5" is not allowed. see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/ for details of naming lines. state (bool) - state=True sets the line to high, state=False sets to low. """ |
# get the line number from the line name. Thats the number of bits to shift
bits_to_shift = int(line_name.split('line')[-1])
dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift)
# Note here that the number of samples written here are 2, which is the
# minimum required for a buffered write. If we configure a timing for the
# write, it is considered buffered.
# see http://zone.ni.com/reference/en-XX/help/370471Y-01/daqmxcfunc/daqmxwritedigitalu32/
DigitalOutputTask(line_name, dig_data).StartAndWait() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def StartAndWait(self):
"""Starts the task and waits until it is done.""" |
self.StartTask()
self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely)
self.ClearTask() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isDone(self):
"""Returns true if task is done.""" |
done = pydaq.bool32()
self.IsTaskDone(ctypes.byref(done))
return done.value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def padDigitalData(self, dig_data, n):
"""Pad dig_data with its last element so that the new array is a multiple of n. """ |
n = int(n)
l0 = len(dig_data)
if l0 % n == 0:
return dig_data # no need of padding
else:
ladd = n - (l0 % n)
dig_data_add = np.zeros(ladd, dtype="uint32")
dig_data_add.fill(dig_data[-1])
return np.concatenate((dig_data, dig_data_add)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def EveryNCallback(self):
"""Called by PyDAQmx whenever a callback event occurs.""" |
# print('ncall ', self.n_callbacks)
if self.do_callbacks:
if self.n_callbacks >= self.callback_step:
# print('n_callbacks', self.n_callbacks)
for func, func_dict in self.callback_funcs:
func(func_dict)
print('func:::', func)
self.latest_callback_index +=1
if self.latest_callback_index >= len(self.callback_function_list):
# print('done with callbacks')
self.do_callbacks = False
else:
out = self.callback_function_list[self.latest_callback_index]
callback_time = out[0]
self.callback_step = int(callback_time/expt_settings.callback_resolution)
# print('updatin callback step', self.callback_step)
self.callback_funcs = out[1]
self.n_callbacks += 1
#print('n_callbacks', self.n_callbacks)
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_vads_trans_id(vads_site_id, vads_trans_date):
""" Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.""" |
vads_trans_id = ""
for i in range(0, 6):
vads_trans_id += str(random.randint(0, 9))
return vads_trans_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_signature(payment_request):
""" Returns the signature for the transaction. To compute the signature, first you have to get the value of all the fields that starts by 'vads_', ordering them alphabetically. All the values are separated by the '+' character. Then you add the value of the payzen certificate. Finaly you hash the string using sha1.""" |
vads_args = {}
for field in payment_request._meta.fields:
if field.name[:5] == 'vads_':
field_value = field.value_from_object(payment_request)
if field_value:
vads_args.update({
field.name: field_value
})
base_str = ''
for key in sorted(vads_args):
base_str += str(vads_args[key]) + '+'
base_str += app_settings.VADS_CERTIFICATE
return hashlib.sha1(base_str.encode("utf-8")).hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_response(data):
"""Process a payment response.""" |
# We check if the signature is valid. If not return
if not is_signature_valid(data):
logger.warning(
"Django-Payzen : Response signature detected as invalid",
extra={"stack": True}
)
return None
from . import forms
from . import models
# The signature is valid
vads_trans_id = data.get("vads_trans_id")
vads_trans_date = data.get("vads_trans_date")
vads_site_id = data.get("vads_site_id")
try:
instance = models.PaymentResponse.objects.get(
vads_trans_id=vads_trans_id,
vads_trans_date=vads_trans_date,
vads_site_id=vads_site_id)
form = forms.PaymentResponseForm(data, instance=instance)
except models.PaymentResponse.DoesNotExist:
form = forms.PaymentResponseForm(data)
if form.is_valid():
response = form.save()
logger.info("Django-Payzen : Transaction {} response received !"
.format(response.vads_trans_id))
else:
logger.error("Django-Payzen : Response could not be saved - {} {}"
.format(form.errors, data),
extra={"stack": True})
response = None
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self):
"""Create substitution nodes for hyperlinks""" |
# In this phase, we look for hyperlinks (references nodes)
# that contain substitutions (of the form "|foo|").
# We then add actual "substitution"s nodes to those references,
# so that they can be replaced by the substitution processor.
subst_re = re.compile(self.subst_pattern)
for link in self.document.traverse(self._maybe_hyperlink):
if 'refuri' not in link:
continue
# Note: "target" nodes do not have a "name" attribute.
if '|' not in link['refuri'] and '|' not in link.get('name', ''):
continue
# This list acts as a cache so that only one substitution node
# is added as a child for each substitution name.
substitutions = []
matches = subst_re.findall(link['refuri']) + \
subst_re.findall(link.get('name', ''))
for subref_text in matches:
if subref_text in substitutions:
continue
substitutions.append(subref_text)
subref_node = nodes.substitution_reference(subref_text)
link.append(subref_node)
self.document.note_substitution_ref(subref_node, subref_text)
# Build a map of substitutions names to child indices
# (minus one since the actual link label is in link[0]).
link['varlinks'] = \
dict(zip(substitutions, range(len(substitutions)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self):
"""Replace substitutions in hyperlinks with their contents""" |
# In this phase, we replace the substitutions in hyperlinks
# with the contents of the sub-nodes introduced during phase 1.
# We also remove those temporary nodes from the tree.
subst_re = re.compile(self.subst_pattern)
# Apply the substitutions to hyperlink references.
for link in self.document.traverse(nodes.reference):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 1)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
content = subst_re.sub(replacer, link[0])
# Cleanup the temporary nodes and recreate the node's content.
link.clear()
del link['varlinks']
link.append(nodes.Text(content))
# Do the same with hyperlink targets.
for link in self.document.traverse(nodes.target):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 0)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
# Cleanup the temporary nodes.
link.clear()
del link['varlinks'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_global_config(path_dict_or_stream):
'''Set the global configuration.
Call this from `main()` with a file system path, stream
object, or a dict. Calling it repeatedly with the same path is
safe. Calling it with a different path or repeatedly with a
stream or dict requires an explicit call to :func:`clear_global_config`.
:param path_dict_or_stream: source of configuration
'''
path = None
mapping = None
stream = None
global _config_file_path
global _config_cache
if isinstance(path_dict_or_stream, string_types):
path = path_dict_or_stream
if _config_file_path and _config_file_path != path:
raise Exception('set_global_config(%r) differs from %r, '
'consider calling clear_global_config first' %
(path, _config_file_path))
_config_file_path = path
stream = open(path)
elif isinstance(path_dict_or_stream, collections.Mapping):
mapping = path_dict_or_stream
elif hasattr(path_dict_or_stream, 'read'):
stream = path_dict_or_stream
else:
raise Exception('set_global_config(%r) instead of a path, '
'mapping object, or stream open for reading' %
path_dict_or_stream)
if stream is not None:
mapping = yaml.load(stream, Loader)
_config_cache = mapping
# TODO: convert to frozen dict?
return _config_cache |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _temporary_config():
'''Temporarily replace the global configuration.
Use this in a 'with' statement. The inner block may freely manipulate
the global configuration; the original global configuration is restored
at exit.
>>> with yakonfig.yakonfig._temporary_config():
... yakonfig.yakonfig.set_global_config({'a': 'b'})
... print yakonfig.yakonfig.get_global_config('a')
b
'''
global _config_cache, _config_file_path
old_cc = _config_cache
old_cfp = _config_file_path
clear_global_config()
yield
_config_cache = old_cc
_config_file_path = old_cfp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def include_yaml(self, node):
'''
load another yaml file from the path specified by node's value
'''
filename = self.construct_scalar(node)
if not filename.startswith('/'):
if self._root is None:
raise Exception('!include_yaml %s is a relative path, '
'but stream lacks path' % filename)
filename = os.path.join(self._root, self.construct_scalar(node))
with self.open(filename, 'r') as fin:
return yaml.load(fin, Loader) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def paystealth(stealthaddr,ephempriv=None,_doctest_nonce=-1):
'''
Input a stealth address, and optionally an ephemeral private key,
and generate a payment pubkey and stealth OP_RETURN data.
(The OP_RETURN data is just a nonce and the ephemeral public key.)
Works with standard single spend key stealth addresses, which
begin with the '2a00' version bytes, and have 00-08 prefix bits
and any 1-byte prefix.
Prefix ff with 08 prefix bits and nonce starts at 0:
>>> paystealth("vJmvinTgWP1phdFnACjc64U5iMExyv7JcQJVZjMA15MRf2KzmqjSpgDjmj8NxaFfiMBUEjaydmNfLBCcXstVDfkjwRoFQw7rLHWdFk", \
'824dc0ed612deca8664b3d421eaed28827eeb364ae76abc9a5924242ddca290a', 0)
('03e05931191100fa6cd072b1eda63079736464b950d2875e67f2ab2c8af9b07b8d', \
'0600000124025c6fb169b0ff1c95426fa073fadc62f50a6e98482ec8b3f26fb73006009d1c00')
'''
if ephempriv is None:
ephempriv = genkeyhex()
addrhex = b58d(stealthaddr)
assert len(addrhex) == 142
assert int(addrhex[-4:-2],16) < 9
# Assume one spend key, and 1-byte prefix and prefix-bits
assert addrhex[:4] == '2a00'
assert addrhex[70:72] == '01'
scanpub = addrhex[4:70]
spendpub = addrhex[72:-4]
ephempub = privtopub(ephempriv,True)
secret = sha256(multiplypub(scanpub,ephempriv,True))
paykey = addpubs(spendpub,privtopub(secret,False),True)
if _doctest_nonce == -1:
nonce = int(genkeyhex(),16) % (2**32)
else:
nonce = _doctest_nonce
assert nonce < 4294967296 and nonce >= 0
startingnonce = nonce
while True:
if nonce > 4294967295:
nonce = 0
noncehex = dechex(nonce,4)
hashprefix = unhexlify(hash256('6a2606' + noncehex + ephempub))[::-1][:4]
prebits = int(addrhex[-4:-2],16)
if prebits == 0:
break
prefix = unhexlify(addrhex[-2:])
# Location of prefix should be explicit if it's ever more than 1 byte
bytepos = 0
cont = False
while prebits > 8: # Not necessary with asserted 1-byte prefix
if hexstrlify(prefix)[2*bytepos:(2*bytepos)+2] != \
hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2]:
cont = True
break
prebits = prebits - 8
bytepos = bytepos + 1
if cont:
continue
prefixhex = hexstrlify(prefix)[2*bytepos:(2*bytepos)+2]
if prefixhex == "": prefixhex = hexstrlify(b"00")
hashprefixhex = hexstrlify(hashprefix)[2*bytepos:(2*bytepos)+2]
if hashprefixhex == "": hashprefixhex = hexstrlify(b"00")
prefixbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(prefixhex, 16)
hashbits = (((1 << (8 - prebits)) - 1) ^ 0xff) & int(hashprefixhex, 16)
if prefixbits == hashbits:
cont = False
else:
cont = True
if not cont:
break
nonce += 1
if nonce == startingnonce:
raise Exception("No valid nonce was found. A different ephemeral key must be used.")
return paykey, '06' + noncehex + ephempub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def receivestealth(scanpriv,spendpriv,ephempub):
'''
Derive the private key for a stealth payment, using the scan and
spend private keys, and the ephemeral public key.
Input private keys should be 64-char hex strings, and ephemeral
public key should be a 66-char hex compressed public key.
>>> receivestealth('af4afaeb40810e5f8abdbb177c31a2d310913f91cf556f5350bca10cbfe8b9ec', \
'd39758028e201e8edf6d6eec6910ae4038f9b1db3f2d4e2d109ed833be94a026', \
'03b8a715c9432b2b52af9d58aaaf0ccbdefe36d45e158589ecc21ba2f064ebb315')
'6134396c3bc9a56ccaf80cd38728e6d3a7751524246e7924b21b08b0bfcc3cc4'
'''
return addprivkeys(sha256(multiplypub(ephempub,scanpriv,True)),spendpriv) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxy(request):
"""Pass an HTTP request on to another server.""" |
# TODO: don't hardcode http
uri = "http://" + HOST + request.META['PATH_INFO']
if request.META['QUERY_STRING']:
uri += '?' + request.META['QUERY_STRING']
headers = {}
for name, val in six.iteritems(request.environ):
if name.startswith('HTTP_'):
name = header_name(name)
headers[name] = val
# TODO: try/except
http = Http()
http.follow_redirects = False
logger.debug("GET for: %s" % uri)
info, content = http.request(uri, 'GET', headers=headers)
response = HttpResponse(content, status=info.pop('status'))
for name, val in info.items():
if not is_hop_by_hop(name):
response[name] = val
logger.info("PROXY to: %s" % uri)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tabulate(data, header=True, headers=None, accessors=None, **table_options):
""" Shortcut function to produce tabular output of data without the need to create and configure a Table instance directly. The function does however return a table instance when it's done for any further use by the user. """ |
if header and not headers:
data = iter(data)
try:
headers = next(data)
except StopIteration:
pass
if headers and hasattr(headers, 'items') and accessors is None:
# Dict mode; Build accessors and headers from keys of data.
data = itertools.chain([headers], data)
accessors = list(headers)
headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split()))
for x in accessors]
t = Table(headers=headers, accessors=accessors, **table_options)
try:
t.print(data)
except RowsNotFound:
pass
return t |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_filter(self, next_filter):
""" Produce formatted output from the raw data stream. """ |
next(next_filter)
while True:
data = (yield)
res = [self.cell_format(access(data)) for access in self.accessors]
next_filter.send(res) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def viewable_width(self):
""" The available combined character width when all padding is removed. """ |
return sum(self.widths) + sum(x['padding'] for x in self.colspec) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_row(self, row, rstrip=True):
""" Format and print the pre-rendered data to the output device. """ |
line = ''.join(map(str, row))
print(line.rstrip() if rstrip else line, file=self.table.file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_fullwidth(self, value):
""" Return a full width column. Note that the padding is inherited from the first cell which inherits from column_padding. """ |
assert isinstance(value, VTMLBuffer)
pad = self.colspec[0]['padding']
fmt = self.make_formatter(self.width - pad, pad,
self.table.title_align)
return VTMLBuffer('\n').join(fmt(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_formatter(self, width, padding, alignment, overflow=None):
""" Create formatter function that factors the width and alignment settings. """ |
if overflow is None:
overflow = self.overflow_default
if overflow == 'clip':
overflower = lambda x: [x.clip(width, self.table.cliptext)]
elif overflow == 'wrap':
overflower = lambda x: x.wrap(width)
elif overflow == 'preformatted':
overflower = lambda x: x.split('\n')
else:
raise RuntimeError("Unexpected overflow mode: %r" % overflow)
align = self.get_aligner(alignment, width)
pad = self.get_aligner('center', width + padding)
return lambda value: [pad(align(x)) for x in overflower(value)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_formatters(self):
""" Create a list formatter functions for each column. They can then be stored in the render spec for faster justification processing. """ |
return [self.make_formatter(inner_w, spec['padding'], spec['align'],
spec['overflow'])
for spec, inner_w in zip(self.colspec, self.widths)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _uniform_dist(self, spread, total):
""" Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform. """ |
fraction, fixed_increment = math.modf(total / spread)
fixed_increment = int(fixed_increment)
balance = 0
dist = []
for _ in range(spread):
balance += fraction
withdrawl = 1 if balance > 0.5 else 0
if withdrawl:
balance -= withdrawl
dist.append(fixed_increment + withdrawl)
return dist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filters(self):
""" Coroutine based filters for render pipeline. """ |
return [
self.compute_style_filter,
self.render_filter,
self.calc_widths_filter,
self.format_row_filter,
self.align_rows_filter,
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_row_filter(self, next_filter):
""" Apply overflow, justification, padding and expansion to a row. """ |
next(next_filter)
while True:
items = (yield)
assert all(isinstance(x, VTMLBuffer) for x in items)
raw = (fn(x) for x, fn in zip(items, self.formatters))
for x in itertools.zip_longest(*raw):
next_filter.send(x) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def width_normalize(self, width):
""" Handle a width style, which can be a fractional number representing a percentage of available width or positive integers which indicate a fixed width. """ |
if width is not None:
if width > 0 and width < 1:
return int(width * self.usable_width)
else:
return int(width) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_widths_filter(self, next_filter):
""" Coroutine to analyze the incoming data stream for creating optimal column width choices. This may buffer some of the incoming stream if there isn't enough information to make good choices about column widths. Also it may resize widths if certain conditions are met such as the terminal width resize event being detected. """ |
window_sent = not not self.data_window
next_primed = False
genexit = None
if not self.data_window:
start = time.monotonic()
while len(self.data_window) < self.min_render_prefill or \
(len(self.data_window) < self.max_render_prefill and
(time.monotonic() - start) < self.max_render_delay):
try:
self.data_window.append((yield))
except GeneratorExit as e:
genexit = e
break
while True:
if self.width != self.desired_width:
self.headers_drawn = False # TODO: make optional
self.width = self.desired_width
remaining = self.usable_width
widths = [x['width'] for x in self.colspec]
preformatted = [i for i, x in enumerate(self.colspec)
if x['overflow'] == 'preformatted']
unspec = []
for i, width in enumerate(widths):
fixed_width = self.width_normalize(width)
if fixed_width is None:
unspec.append(i)
else:
widths[i] = fixed_width
remaining -= fixed_width
if unspec:
if self.table.flex and self.data_window:
for i, w in self.calc_flex(self.data_window, remaining,
unspec, preformatted):
widths[i] = w
else:
dist = self._uniform_dist(len(unspec), remaining)
for i, width in zip(unspec, dist):
widths[i] = width
self.widths = widths
self.formatters = self.make_formatters()
if not next_primed:
next(next_filter)
next_primed = True
if not window_sent:
for x in self.data_window:
next_filter.send(x)
window_sent = True
if genexit:
raise genexit
data = (yield)
self.data_window.append(data)
next_filter.send(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_flex(self, data, max_width, cols, preformatted=None):
""" Scan data returning the best width for each column given the max_width constraint. If some columns will overflow we calculate the best concession widths. """ |
if preformatted is None:
preformatted = []
colstats = []
for i in cols:
lengths = [len(xx) for x in data
for xx in x[i].text().splitlines()]
if self.headers:
lengths.append(len(self.headers[i]))
lengths.append(self.width_normalize(self.colspec[i]['minwidth']))
counts = collections.Counter(lengths)
colstats.append({
"column": i,
"preformatted": i in preformatted,
"counts": counts,
"offt": max(lengths),
"chop_mass": 0,
"chop_count": 0,
"total_mass": sum(a * b for a, b in counts.items())
})
self.adjust_widths(max_width, colstats)
required = sum(x['offt'] for x in colstats)
justify = self.table.justify if self.table.justify is not None else \
self.justify_default
if required < max_width and justify:
# Fill remaining space proportionately.
remaining = max_width
for x in colstats:
x['offt'] = int((x['offt'] / required) * max_width)
remaining -= x['offt']
if remaining:
dist = self._uniform_dist(len(cols), remaining)
for adj, col in zip(dist, colstats):
col['offt'] += adj
return [(x['column'], x['offt']) for x in colstats] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adjust_widths(self, max_width, colstats):
""" Adjust column widths based on the least negative affect it will have on the viewing experience. We take note of the total character mass that will be clipped when each column should be narrowed. The actual score for clipping is based on percentage of total character mass, which is the total number of characters in the column. """ |
adj_colstats = []
for x in colstats:
if not x['preformatted']:
adj_colstats.append(x)
else:
max_width -= x['offt']
next_score = lambda x: (x['counts'][x['offt']] + x['chop_mass'] +
x['chop_count']) / x['total_mass']
cur_width = lambda: sum(x['offt'] for x in adj_colstats)
min_width = lambda x: self.width_normalize(
self.colspec[x['column']]['minwidth'])
while cur_width() > max_width:
nextaffects = [(next_score(x), i)
for i, x in enumerate(adj_colstats)
if x['offt'] > min_width(x)]
if not nextaffects:
break # All columns are as small as they can get.
nextaffects.sort()
chop = adj_colstats[nextaffects[0][1]]
chop['chop_count'] += chop['counts'][chop['offt']]
chop['chop_mass'] += chop['chop_count']
chop['offt'] -= 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_key(self, value):
""" Make camelCase variant of value. """ |
if value:
parts = [self.key_filter.sub('', x)
for x in self.key_split.split(value.lower())]
key = parts[0] + ''.join(map(str.capitalize, parts[1:]))
else:
key = ''
if key in self.seen_keys:
i = 1
while '%s%d' % (key, i) in self.seen_keys:
i += 1
key = '%s%d' % (key, i)
self.seen_keys.add(key)
return key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_type_validator(value_type):
"""Build a validator that only checks the type of a value.""" |
def type_validator(data):
"""Validate instances of a particular type."""
if isinstance(data, value_type):
return data
raise NotValid('%r is not of type %r' % (data, value_type))
return type_validator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_static_validator(exact_value):
"""Build a validator that checks if the data is equal to an exact value.""" |
def static_validator(data):
"""Validate by equality."""
if data == exact_value:
return data
raise NotValid('%r is not equal to %r' % (data, exact_value))
return static_validator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_iterable_validator(iterable):
"""Build a validator from an iterable.""" |
sub_schemas = [parse_schema(s) for s in iterable]
def item_validator(value):
"""Validate items in an iterable."""
for sub in sub_schemas:
try:
return sub(value)
except NotValid:
pass
raise NotValid('%r invalidated by anything in %s.' % (value, iterable))
def iterable_validator(data):
"""Validate an iterable."""
if not type(data) is type(iterable):
raise NotValid('%r is not of type %s' % (data, type(iterable)))
return type(iterable)(item_validator(value) for value in data)
return iterable_validator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _determine_keys(dictionary):
"""Determine the different kinds of keys.""" |
optional = {}
defaults = {}
mandatory = {}
types = {}
for key, value in dictionary.items():
if isinstance(key, Optional):
optional[key.value] = parse_schema(value)
if isinstance(value, BaseSchema) and\
value.default is not UNSPECIFIED:
defaults[key.value] = (value.default, value.null_values)
continue # pragma: nocover
if type(key) is type:
types[key] = parse_schema(value)
continue
mandatory[key] = parse_schema(value)
return mandatory, optional, types, defaults |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_mandatory_keys(mandatory, validated, data, to_validate):
"""Validate the manditory keys.""" |
errors = []
for key, sub_schema in mandatory.items():
if key not in data:
errors.append('missing key: %r' % (key,))
continue
try:
validated[key] = sub_schema(data[key])
except NotValid as ex:
errors.extend(['%r: %s' % (key, arg) for arg in ex.args])
to_validate.remove(key)
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_optional_key(key, missing, value, validated, optional):
"""Validate an optional key.""" |
try:
validated[key] = optional[key](value)
except NotValid as ex:
return ['%r: %s' % (key, arg) for arg in ex.args]
if key in missing:
missing.remove(key)
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_type_key(key, value, types, validated):
"""Validate a key's value by type.""" |
for key_schema, value_schema in types.items():
if not isinstance(key, key_schema):
continue
try:
validated[key] = value_schema(value)
except NotValid:
continue
else:
return []
return ['%r: %r not matched' % (key, value)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_other_keys(optional, types, missing, validated, data, to_validate):
"""Validate the rest of the keys present in the data.""" |
errors = []
for key in to_validate:
value = data[key]
if key in optional:
errors.extend(
_validate_optional_key(
key, missing, value, validated, optional))
continue
errors.extend(_validate_type_key(key, value, types, validated))
return errors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_dict_validator(dictionary):
"""Build a validator from a dictionary.""" |
mandatory, optional, types, defaults = _determine_keys(dictionary)
def dict_validator(data):
"""Validate dictionaries."""
missing = list(defaults.keys())
if not isinstance(data, dict):
raise NotValid('%r is not of type dict' % (data,))
validated = {}
to_validate = list(data.keys())
errors = _validate_mandatory_keys(
mandatory, validated, data, to_validate)
errors.extend(
_validate_other_keys(
optional, types, missing, validated, data, to_validate))
if errors:
raise NotValid(*errors)
for key in missing:
validated[key] = defaults[key][0]
return validated
return dict_validator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_schema(schema):
"""Parse a val schema definition.""" |
if isinstance(schema, BaseSchema):
return schema.validate
if type(schema) is type:
return _build_type_validator(schema)
if isinstance(schema, dict):
return _build_dict_validator(schema)
if type(schema) in (list, tuple, set):
return _build_iterable_validator(schema)
if callable(schema):
return _build_callable_validator(schema)
return _build_static_validator(schema) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, data):
"""Validate data. Raise NotValid error for invalid data.""" |
validated = self._validated(data)
errors = []
for validator in self.additional_validators:
if not validator(validated):
errors.append(
"%s invalidated by '%s'" % (
validated, _get_repr(validator)))
if errors:
raise NotValid(*errors)
if self.default is UNSPECIFIED:
return validated
if self.null_values is not UNSPECIFIED\
and validated in self.null_values:
return self.default
if validated is None:
return self.default
return validated |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validated(self, data):
"""Validate data if any subschema validates it.""" |
errors = []
for sub in self.schemas:
try:
return sub(data)
except NotValid as ex:
errors.extend(ex.args)
raise NotValid(' and '.join(errors)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validated(self, data):
"""Validate data if all subschemas validate it.""" |
for sub in self.schemas:
data = sub(data)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validated(self, data):
"""Convert data or die trying.""" |
try:
return self.convert(data)
except (TypeError, ValueError) as ex:
raise NotValid(*ex.args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validated(self, values):
"""Validate if the values are validated one by one in order.""" |
if self.length != len(values):
raise NotValid(
"%r does not have exactly %d values. (Got %d.)" % (
values, self.length, len(values)))
return type(self.schemas)(
self.schemas[i].validate(v) for i, v in enumerate(values)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _connectToFB(self):
"""Establish the actual TCP connection to FB""" |
if self.connected_to_fb:
logger.debug("Already connected to fb")
return True
logger.debug("Connecting to fb")
token = facebook_login.get_fb_token()
try:
self.fb = facebook.GraphAPI(token)
except:
print("Couldn't connect to fb")
return False
self.connected_to_fb=True
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def KnowsFile(self,filename):
"""Looks at extension and decides if it knows how to manage this file""" |
if self._isMediaFile(filename) or self._isConfigFile(filename):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Remove(self,directory,filename):
"""Deletes files from fb""" |
if self._isMediaFile(filename):
return self._remove_media(directory,filename)
elif self._isConfigFile(filename):
return True
print "Not handled!"
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_config(self,directory,filename):
"""Manages FB config files""" |
basefilename=os.path.splitext(filename)[0]
ext=os.path.splitext(filename)[1].lower()
#if filename==LOCATION_FILE:
#return self._update_config_location(directory)
#FIXME
#elif filename==TAG_FILE:
#return self._update_config_tags(directory)
if filename==SET_FILE:
print("%s - Moving photos to album"%(filename))
return self._upload_media(directory,movealbum_request=True)
elif filename==MEGAPIXEL_FILE:
print("%s - Resizing photos"%(filename))
return self._upload_media(directory,resize_request=True)
elif ext in self.FB_META_EXTENSIONS:
print("%s - Changing photo title"%(basefilename))
return self._upload_media(directory,basefilename,changetitle_request=True)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_title(self,directory,filename):
"""Loads image title if any""" |
# =========== LOAD TITLE ========
fullfile=os.path.join(directory,filename+'.title')
try:
logger.debug('trying to open [%s]'%(fullfile))
_title=(open(fullfile).readline().strip())
logger.debug("_updatemeta: %s - title is '%s'",filename,_title)
except:
_title=''
return _title |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_megapixels(self,directory):
"""Opens megapixel file, if contains '3.5' for instance, will scale all uploaded photos in directory this this size, the original photo is untouched. Returns None if file not found """ |
#FIXME: should check if DB tracking file before using it
fullfile=os.path.join(directory,MEGAPIXEL_FILE)
try:
mp=float(open(fullfile).readline())
logger.debug("_load_megapixel: MP from file is %f",mp)
except:
logger.warning("Couldn't open image size file in %s, not scaling images"\
%(directory))
return None
return mp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_sets(self,directory):
"""Loads sets from set file and return as list of strings """ |
# --- Read sets out of file
_sets=[]
try:
fullfile=os.path.join(directory,SET_FILE)
lsets=open(fullfile).readline().split(',')
for tag in lsets:
_sets.append(tag.strip())
except:
logger.error("No sets found in %s, FB needs an album name (%s)"\
%(directory,SET_FILE))
sys.exit(1)
return _sets |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_album(self,directory):
""" Loads set name from SET_FILE, looks up album_id on fb, it it doesn't exists, creates album. Returns album id and album name """ |
if not self._connectToFB():
print("%s - Couldn't connect to fb"%(directory))
return None,None
# Load sets from SET_FILE
_sets=self._load_sets(directory)
# Only grab the first set, FB supports only one set per photo
myset=_sets[0]
logger.debug("Getting album id for %s"%(myset))
# Connect to fb and get dicionary of photosets
psets=self._getphotosets()
# create if it doesn't exist
if myset not in psets:
logger.info('set [%s] not in fb sets, will create set'%(myset))
self._createphotoset(myset)
# Now reaload photosets from fb
psets=self._getphotosets()
# Return the album id, album name
return psets[myset]['id'],myset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getphoto_originalsize(self,pid):
"""Asks fb for photo original size returns tuple with width,height """ |
logger.debug('%s - Getting original size from fb'%(pid))
i=self.fb.get_object(pid)
width=i['images'][0]['width']
height=i['images'][0]['height']
return (width,height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getphoto_location(self,pid):
"""Asks fb for photo location information returns tuple with lat,lon,accuracy """ |
logger.debug('%s - Getting location from fb'%(pid))
lat=None
lon=None
accuracy=None
resp=self.fb.photos_geo_getLocation(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - fb: photos_geo_getLocation failed with status: %s",\
resp.attrib['stat']);
return (None,None,None)
for location in resp.find('photo'):
lat=location.attrib['latitude']
lon=location.attrib['longitude']
accuracy=location.attrib['accuracy']
return (lat,lon,accuracy) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _remove_media(self,directory,files=None):
"""Removes specified files from fb""" |
# Connect if we aren't already
if not self._connectToFB():
logger.error("%s - Couldn't connect to fb")
return False
db=self._loadDB(directory)
# If no files given, use files from DB in dir
if not files:
files=db.keys()
#If only one file given, make it a list
if isinstance(files,basestring):
files=[files]
for fn in files:
print("%s - Deleting from fb [local copy intact]"%(fn))
try:
pid=db[fn]['photoid']
except:
logger.debug("%s - Was never in fb DB"%(fn))
continue
try:
self.fb.delete_object(pid)
except facebook.GraphAPIError as e:
print("%s - fb: delete failed with status: %s:%s"\
%(fn,e.type,e.message))
return False
logger.debug('Removing %s from fb DB'%(fn))
del db[fn]
self._saveDB(directory,db)
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.