text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tabs(self, model_alias, object):
""" Get all active tabs for given model :param model_alias: :param object: Object used to filter tabs :return: """ |
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.display_filter(object):
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tab(self, model_alias, object, tab_code):
""" Get tab for given object and tab code :param model_alias: :param object: Object used to render tab :param tab_code: Tab code to use :return: """ |
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.code == tab_code and item.display_filter(object):
return item
raise Exception('Given tab does not exits or is filtered') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, model_alias, code='general', name=None, order=None, display_filter=None):
""" Register new tab :param model_alias: :param code: :param name: :param order: :return: """ |
model_alias = self.get_model_alias(model_alias)
def wrapper(create_layout):
item = TabItem(
code=code,
create_layout=create_layout,
name=name,
order=order,
display_filter=display_filter
)
if item in self.tabs[model_alias]:
raise Exception("Tab {} already registered for model {}".format(code, model_alias))
self.tabs[model_alias].append(item)
self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.order if item.order else 999)
return create_layout
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, model_alias, code='general', name=None, order=None, display_filter=None):
""" Update given tab :param model_alias: :param code: :param name: :param order: :param display_filter: :return: """ |
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.code != code:
continue
if name:
item.name = name
if order:
item.order = order
if display_filter:
item.display_filter = display_filter
break
self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.code if item.code else 999) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_model_alias(self, model_alias):
"""Get model alias if class then convert to alias string""" |
from trionyx.models import BaseModel
if inspect.isclass(model_alias) and issubclass(model_alias, BaseModel):
config = models_config.get_config(model_alias)
return '{}.{}'.format(config.app_label, config.model_name)
return model_alias |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auto_generate_missing_tabs(self):
"""Auto generate tabs for models with no tabs""" |
for config in models_config.get_all_configs():
model_alias = '{}.{}'.format(config.app_label, config.model_name)
if model_alias not in self.tabs:
@self.register(model_alias)
def general_layout(obj):
return Layout(
Column12(
Panel(
'info',
DescriptionList(*[f.name for f in obj.get_fields()])
)
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(self):
"""Give back tab name if is set else generate name by code""" |
if self._name:
return self._name
return self.code.replace('_', ' ').capitalize() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_layout(self, object):
"""Get complete layout for given object""" |
layout = self.create_layout(object)
if isinstance(layout, Component):
layout = Layout(layout)
if isinstance(layout, list):
layout = Layout(*layout)
for update_layout in self.layout_updates:
update_layout(layout, object)
layout.set_object(object)
return layout |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Main entry point for the script. Create a parser, process the command line, and run it """ |
parser = cli.Cli()
parser.parse(sys.argv[1:])
return parser.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_backup(filename, root_dir, ignore=[], ignore_ext=[], ignore_pattern=[]):
"""The backup utility method. :param root_dir: the directory you want to backup :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored. """ |
tab = " "
# Step 1, calculate files to backup
print("Perform backup '%s'..." % root_dir)
print(tab + "1. Calculate files...")
total_size_in_bytes = 0
init_mode = WinFile.init_mode
WinFile.use_regular_init()
fc = FileCollection.from_path_except(
root_dir, ignore, ignore_ext, ignore_pattern)
WinFile.set_initialize_mode(complexity=init_mode)
for winfile in fc.iterfiles():
total_size_in_bytes += winfile.size_on_disk
# Step 2, write files to zip archive
print(tab * 2 + "Done, got %s files, total size is %s." % (
len(fc), string_SizeInBytes(total_size_in_bytes)))
print(tab + "2. Backup files...")
filename = "%s %s.zip" % (
filename, datetime.now().strftime("%Y-%m-%d %Hh-%Mm-%Ss"))
print(tab * 2 + "Write to '%s'..." % filename)
current_dir = os.getcwd()
with ZipFile(filename, "w") as f:
os.chdir(root_dir)
for winfile in fc.iterfiles():
relpath = os.path.relpath(winfile.abspath, root_dir)
f.write(relpath)
os.chdir(current_dir)
print(tab + "Complete!") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prompt_file(prompt, default=None):
"""Prompt a file name with autocompletion""" |
def complete(text: str, state):
text = text.replace('~', HOME)
sugg = (glob.glob(text + '*') + [None])[state]
if sugg is None:
return
sugg = sugg.replace(HOME, '~')
sugg = sugg.replace('\\', '/')
if os.path.isdir(sugg) and not sugg.endswith('/'):
sugg += '/'
return sugg
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
if default is not None:
r = input('%s [%r]: ' % (prompt, default))
else:
r = input('%s: ' % prompt)
r = r or default
# remove the autocompletion before quitting for future input()
readline.parse_and_bind('tab: self-insert')
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isprime(n):
"""Check the number is prime value. if prime value returns True, not False.""" |
n = abs(int(n))
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
# 在一般领域, 对正整数n, 如果用2 到 sqrt(n) 之间所有整数去除, 均无法整除, 则n为质数.
for x in range(3, int(n ** 0.5)+1, 2):
if n % x == 0:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self):
"""Create an application context on the server""" |
self.before_create()
puts(green('Creating app context'))
tdir = os.path.dirname(__file__)
# Ensure the app context user exists
user_ensure(self.user, home='/home/' + self.user)
dir_ensure('/home/%s/.ssh' % self.user)
t = '/home/%s/.ssh/authorized_keys'
app_authorized_keys = t % env.user
ctx_authorized_keys = t % self.user
# Ensure the app user has the same authorized_keys as the admin user
if file_exists(app_authorized_keys) and \
not file_exists(ctx_authorized_keys):
sudo('cp %s %s' % (app_authorized_keys, ctx_authorized_keys))
file_attribs(ctx_authorized_keys, mode=755,
owner=self.user, group=self.user)
# Actions to be performed with the app context user
with settings(user=self.user):
# Make sure the dot files exist
# This is mostly necessary for virtualenvwrapper to work properly
for f in ['bashrc', 'bash_profile', 'profile']:
lfn = os.path.join(tdir, 'templates', '%s.tmpl' % f)
contents = file_local_read(lfn) % self.__dict__
rfn = '/home/%s/.%s' % (self.user, f)
file_ensure(rfn, owner=self.user, group=self.user)
file_update(rfn, lambda _: contents)
# Make sure the sites folder exists
dir_ensure('/home/%s/sites' % self.user)
# Make sure the app's required folders exist
for d in [self.root_dir, self.releases_dir, self.etc_dir,
self.log_dir, self.run_dir, self.shared_dir]:
dir_ensure(d)
# Create the virtualenv
run('mkvirtualenv ' + self.name)
self.after_create() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload_release(self):
"""Upload an application bundle to the server for a given context""" |
self.before_upload_release()
with settings(user=self.user):
with app_bundle():
local_bundle = env.local_bundle
env.bundle = '/tmp/' + os.path.basename(local_bundle)
file_upload(env.bundle, local_bundle)
# Extract the bundle into a release folder
current_release_link = self.releases_dir + '/current'
previous_release_link = self.releases_dir + '/previous'
release_dir = self.releases_dir + '/' + env.release
dir_ensure(release_dir)
with cd(release_dir):
run('tar -xvf ' + env.bundle)
# Delete the remote bundle
run('rm ' + env.bundle)
# Remove previous release link
if file_exists(previous_release_link):
run('rm ' + previous_release_link)
# Move current to previous
if file_exists(current_release_link):
run('mv %s %s' % (current_release_link, previous_release_link))
# Link the current release
file_link(release_dir, self.releases_dir + "/current")
# Install app dependencies
with cd(current_release_link):
with prefix('workon ' + self.name):
run('pip install -r requirements.txt')
self.after_upload_release() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _print_breakdown(cls, savedir, fname, data):
"""Function to print model fixtures into generated file""" |
if not os.path.exists(savedir):
os.makedirs(savedir)
with open(os.path.join(savedir, fname), 'w') as fout:
fout.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_json_breakdown(cls, fname):
"""Read json file to get fixture data""" |
if not os.path.exists(fname):
raise RuntimeError
with open(fname, 'r') as data_file:
return cls.fixup_from_json(data_file.read()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, fetchcommand, sql, params=None):
""" where 'fetchcommand' is either 'fetchone' or 'fetchall' """ |
cur = self.conn.cursor()
if params:
if not type(params).__name__ == 'tuple':
raise ValueError('the params argument needs to be a tuple')
return None
cur.execute(sql, params)
else:
cur.execute(sql)
self.conn.commit()
if not fetchcommand or fetchcommand == 'none':
return
if fetchcommand == 'last' or fetchcommand == 'lastid':
lastdata = cur.fetchall()
self.conn.commit()
return lastdata
m = insertion_pattern.match(sql)
"""
TODO: This is a BUG - need to also check tail of query for RETURNING
"""
if m:
""" lastid = cursor.fetchone()['lastval'] """
lastdata = cur.fetchone()
self.conn.commit()
return lastdata
if fetchcommand == 'fetchone' or fetchcommand == 'one':
return cur.fetchone()
elif fetchcommand == 'fetchall' or fetchcommand == 'all':
return cur.fetchall()
else:
msg = "expecting <fetchcommand> argument to be either 'fetchone'|'one'|'fetchall|all'"
raise ValueError(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cp_parents(files, target_dir: Union[str, Path]):
""" This function requires Python >= 3.6. This acts like bash cp --parents in Python inspiration from http://stackoverflow.com/questions/15329223/copy-a-file-into-a-directory-with-its-original-leading-directories-appended example source: /tmp/e/f dest: /tmp/a/b/c/d/ result: /tmp/a/b/c/d/tmp/e/f cp_parents('/tmp/a/b/c/d/boo','/tmp/e/f') cp_parents('x/hi','/tmp/e/f/g') --> copies ./x/hi to /tmp/e/f/g/x/hi """ |
# %% make list if it's a string
if isinstance(files, (str, Path)):
files = [files]
# %% cleanup user
# relative path or absolute path is fine
files = (Path(f).expanduser() for f in files)
target_dir = Path(target_dir).expanduser()
# %% work
for f in files:
# to make it work like cp --parents, copying absolute paths if specified
newpath = target_dir / f.parent
newpath.mkdir(parents=True, exist_ok=True)
shutil.copy2(f, newpath) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_plot_params(theme=None):
""" set plot parameters for session, as an alternative to manipulating RC file """ |
# set solarized color progression no matter what
mpl.rcParams['axes.color_cycle'] = ('268bd2, dc322f, 859900, ' +
'b58900, d33682, 2aa198, ' +
'cb4b16, 002b36')
# non-color options are independent as well
mpl.rcParams['figure.figsize'] = 11, 8 # figure size in inches
mpl.rcParams['lines.linewidth'] = 2.0 # line width in points
mpl.rcParams['axes.grid'] = 'True' # display grid or not
mpl.rcParams['font.size'] = 18.0
mpl.rcParams['axes.titlesize'] = 18 # fontsize of the axes title
mpl.rcParams['axes.labelsize'] = 18 # fontsize of the x any y labels
mpl.rcParams['legend.fontsize'] = 18
mpl.rcParams['figure.edgecolor'] = 'None' # figure edgecolor
mpl.rcParams['savefig.edgecolor'] = 'None' # figure edgecolor saving
# color by theme
if theme == 'dark':
mpl.rcParams['text.color'] = "bbbbbb"
mpl.rcParams['axes.facecolor'] = '333333'
mpl.rcParams['axes.edgecolor'] = '999999' # axes edge color
mpl.rcParams['axes.labelcolor'] = 'bbbbbb'
mpl.rcParams['xtick.color'] = 'bbbbbb' # color of the tick labels
mpl.rcParams['ytick.color'] = 'bbbbbb' # color of the tick labels
mpl.rcParams['grid.color'] = 'bbbbbb' # grid color
mpl.rcParams['figure.facecolor'] = '333333' # figure facecolor
mpl.rcParams['savefig.facecolor'] = '333333' # figure facecolor saving
elif theme == 'white':
mpl.rcParams['text.color'] = "111111"
mpl.rcParams['axes.facecolor'] = 'ffffff'
mpl.rcParams['axes.edgecolor'] = '111111' # axes edge color
mpl.rcParams['axes.labelcolor'] = '111111'
mpl.rcParams['xtick.color'] = '111111' # color of the tick labels
mpl.rcParams['ytick.color'] = '111111' # color of the tick labels
mpl.rcParams['grid.color'] = '111111' # grid color
mpl.rcParams['figure.facecolor'] = 'ffffff' # figure facecolor
mpl.rcParams['savefig.facecolor'] = 'ffffff' # figure facecolor saving
else:
mpl.rcParams['text.color'] = "777777"
mpl.rcParams['axes.facecolor'] = 'f7f7f5'
mpl.rcParams['axes.edgecolor'] = '111111' # axes edge color
mpl.rcParams['axes.labelcolor'] = '777777'
mpl.rcParams['xtick.color'] = '777777' # color of the tick labels
mpl.rcParams['ytick.color'] = '777777' # color of the tick labels
mpl.rcParams['grid.color'] = '777777' # grid color
mpl.rcParams['figure.facecolor'] = 'f7f7f5' # figure facecolor
mpl.rcParams['savefig.facecolor'] = 'f7f7f5' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(self, *args, **options):
"""Load a default admin user""" |
try:
admin = User.objects.get(username='admin')
except User.DoesNotExist:
admin = User(
username='admin',
first_name='admin',
last_name='admin',
email='admin@localhost.localdomain',
is_staff=True,
is_active=True,
is_superuser=True,
)
admin.set_password('admin')
admin.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def left_zero_pad(s, blocksize):
""" Left padding with zero bytes to a given block size :param s: :param blocksize: :return: """ |
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b('\000') + s
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_bot(user_agent):
"""Check if user_agent is a known bot.""" |
bot_list = [
'http://www.baidu.com/search/spider.html',
'python-requests',
'http://ltx71.com/',
'http://drupal.org/',
'www.sogou.com',
'http://search.msn.com/msnbot.htm',
'semantic-visions.com crawler',
]
for bot in bot_list:
if re.search(re.escape(bot), user_agent):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_download(ending):
"""Check if file ending is considered as download.""" |
list = [
'PDF',
'DOC',
'TXT',
'PPT',
'XLSX',
'MP3',
'SVG',
'7Z',
'HTML',
'TEX',
'MPP',
'ODT',
'RAR',
'ZIP',
'TAR',
'EPUB',
]
list_regex = [
'PDF'
]
if ending in list:
return True
for file_type in list_regex:
if re.search(re.escape(file_type), ending):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch(self, year, week, overwrite=False):
"""Fetch PageViews and Downloads from Elasticsearch.""" |
self.config['overwrite_files'] = overwrite
time_start = time.time()
self._fetch_pageviews(self.storage, year, week, ip_users=False)
self._fetch_downloads(self.storage, year, week, ip_users=False)
# CDS has no user_agent before this date 1433400000:
self._fetch_pageviews(self.storage, year, week, ip_users=True)
self._fetch_downloads(self.storage, year, week, ip_users=True)
logger.info('Fetch %s-%s in %s seconds.', year, week,
time.time() - time_start) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_pageviews(self, storage, year, week, ip_users=False):
""" Fetch PageViews from Elasticsearch. :param time_from: Staring at timestamp. :param time_to: To timestamp """ |
prefix = 'Pageviews'
if ip_users:
query_add = "AND !(bot:True) AND (id_user:0)"
prefix += '_IP'
else:
query_add = "AND !(bot:True) AND !(id_user:0)"
store = self.storage.get(prefix, year, week)
if not self.config['overwrite_files'] and store.does_file_exist():
logger.debug("File already exist, skip: {}-{}".format(year, week))
return
store.open('overwrite')
time_from, time_to = get_week_dates(year, week, as_timestamp=True)
es_type = "events.pageviews"
es_query = self.ES_QUERY % {'timestamp_start': time_from * 1000,
'timestamp_end': time_to * 1000,
'event_name': es_type,
'query_add': query_add}
logger.info("{}: {} - {}".format(es_type, time_from, time_to))
for hit in self._fetch_elasticsearch(es_query):
item = {}
try:
item['user'] = hit['_source'].get('id_user')
if ip_users:
assert 0 == item['user']
else:
assert 0 != item['user']
assert es_type == hit['_type']
item['timestamp'] = float(hit['_source']['@timestamp']) / 1000
if ip_users:
item['ip'] = str(hit['_source'].get('client_host'))
user_agent = str(hit['_source'].get('user_agent'))
if user_agent is None or user_agent == 'None':
continue
elif _is_bot(user_agent):
continue
item['user_agent'] = user_agent
item['recid'] = int(hit['_source'].get('id_bibrec'))
except UnicodeEncodeError as e:
# TODO: Error logging.
# print(e)
continue
# Save entry
store.add_hit(item)
store.close()
# Delete File if no hits were added.
if store.number_of_hits == 0:
store.delete() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_elasticsearch(self, es_query):
""" Load data from Elasticsearch. :param query: TODO :param time_from: TODO :param time_to: TODO :returns: TODO """ |
# TODO: Show error if index is not found.
scanResp = self._esd.search(index=self.config['es_index'],
body=es_query, size=2000,
search_type="scan", scroll="10000",
timeout=900, request_timeout=900)
resp = dict(scanResp)
resp.pop('_scroll_id')
logger.debug(resp)
scroll_hits = scanResp['hits']['total']
scrollTime = scanResp['took']
scrollId = scanResp['_scroll_id']
# Check for shard errors
if scanResp['_shards']['failed'] > 0:
logger.warn("Failing shards, check ES")
retry_count = 0
number_of_retrys = 5
hit_count = 0
while True:
try:
response = self._esd.scroll(scroll_id=scrollId, scroll="10000",
request_timeout=900)
if response['_scroll_id'] != scrollId:
scrollId = response['_scroll_id']
if scanResp['_shards']['failed'] > 0:
print("Failing shards, check ES")
# No more hits
if len(response["hits"]["hits"]) == 0:
break
except esd_exceptions.ConnectionTimeout:
logger.warning("ES exceptions: Connection Timeout")
if retry_count >= number_of_retrys:
raise esd_exceptions.ConnectionTimeout()
retry_count += 1
continue
except StopIteration:
break
except Exception as e:
# TODO: Logging
logging.exception("ES exception", exc_info=True)
print("EXCEPTION")
print(e)
break
for hit in response["hits"]["hits"]:
yield hit
hit_count += 1
if hit_count > scroll_hits:
# More hits as expected, happens sometimes.
logger.info('More hits as expected %s/%s', hit_count, scroll_hits)
elif hit_count < scroll_hits:
# Less hits as expected, something went wrong.
logger.warn('Less hits as expected %s/%s', hit_count, scroll_hits)
logger.info('%s Hits', hit_count) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_queue(self, *names):
"""Debug-log some of the queues. ``names`` may include any of "worker", "available", "priorities", "expiration", "workers", or "reservations_ITEM" filling in some specific item. """ |
conn = redis.StrictRedis(connection_pool=self.pool)
for name in names:
if name == 'worker':
logger.debug('last worker: ' + conn.get(self._key_worker()))
elif name == 'available':
logger.debug('available: ' +
str(conn.zrevrange(self._key_available(), 0, -1,
withscores=True)))
elif name == 'priorities':
logger.debug('priorities: ' +
str(conn.hgetall(self._key_priorities())))
elif name == 'expiration':
logger.debug('expiration: ' +
str(conn.zrevrange(self._key_expiration(), 0, -1,
withscores=True)))
elif name == 'workers':
logger.debug('workers: ' +
str(conn.hgetall(self._key_workers())))
elif name.startswith('reservations_'):
item = name[len('reservations_'):]
logger.debug('reservations for ' + item + ': ' +
str(conn.smembers(self._key_reservations(item)))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def worker_id(self):
"""A unique identifier for this queue instance and the items it owns.""" |
if self._worker_id is not None: return self._worker_id
return self._get_worker_id(self._conn()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_worker_id(self, conn):
"""Get the worker ID, using a preestablished connection.""" |
if self._worker_id is None:
self._worker_id = conn.incr(self._key_worker())
return self._worker_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_item(self, item, priority):
"""Add ``item`` to this queue. It will have the specified ``priority`` (highest priority runs first). If it is already in the queue, fail if it is checked out or reserved, or change its priority to ``priority`` otherwise. """ |
conn = self._conn()
self._run_expiration(conn)
script = conn.register_script("""
if (redis.call("hexists", KEYS[2], ARGV[1]) ~= 0) and
not(redis.call("zscore", KEYS[1], ARGV[1]))
then
return -1
end
redis.call("zadd", KEYS[1], ARGV[2], ARGV[1])
redis.call("hset", KEYS[2], ARGV[1], ARGV[2])
return 0
""")
result = script(keys=[self._key_available(), self._key_priorities()],
args=[item, priority])
if result == -1:
raise ItemInUseError(item)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_out_item(self, expiration):
"""Get the highest-priority item out of this queue. Returns the item, or None if no items are available. The item must be either ``return_item()`` or ``renew_item()`` before ``expiration`` seconds pass, or it will become available to future callers. The item will be marked as being owned by ``worker_id``. """ |
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
expiration += time.time()
script = conn.register_script("""
local item = redis.call("zrevrange", KEYS[1], 0, 0)
if #item == 0 then return nil end
item = item[1]
redis.call("zrem", KEYS[1], item)
redis.call("zadd", KEYS[2], ARGV[1], item)
redis.call("hset", KEYS[3], "i" .. item, "w" .. ARGV[2])
redis.call("hset", KEYS[3], "w" .. ARGV[2], "i" .. item)
return item
""")
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_workers()],
args=[expiration, self._get_worker_id(conn)])
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def renew_item(self, item, expiration):
"""Update the expiration time for ``item``. The item will remain checked out for ``expiration`` seconds beyond the current time. This queue instance must have already checked out ``item``, and this method can fail if ``item`` is already overdue. """ |
conn = self._conn()
self._run_expiration(conn)
expiration += time.time()
script = conn.register_script("""
-- already expired?
if redis.call("hget", KEYS[2], "i" .. ARGV[1]) ~= "w" .. ARGV[3]
then return -1 end
-- otherwise just update the expiration
redis.call("zadd", KEYS[1], ARGV[2], ARGV[1])
return 0
""")
result = script(keys=[self._key_expiration(), self._key_workers()],
args=[item, expiration, self._get_worker_id(conn)])
if result == -1:
raise LostLease(item)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reserve_items(self, parent_item, *items):
"""Reserve a set of items until a parent item is returned. Prevent ``check_out_item()`` from returning any of ``items`` until ``parent_item`` is completed or times out. For each item, if it is not already checked out or reserved by some other parent item, it is associated with ``parent_item``, and the reservation will be released when ``parent_item`` completes or times out. Returns a list that is a subset of ``items`` for which we could get the reservation. Raises ``LostLease`` if this queue instance no longer owns ``parent_item``. If any of the items do not exist, they are silently ignored. """ |
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
script = conn.register_script("""
-- expired?
if redis.call("hget", KEYS[2], "i" .. ARGV[1]) ~= "w" .. ARGV[2]
then return -1 end
-- loop through each item
local result = {}
for i = 3, #ARGV do
local item = ARGV[i]
-- item must be available to reserve
if redis.call("zscore", KEYS[1], item) then
redis.call("zrem", KEYS[1], item)
redis.call("sadd", KEYS[3], item)
result[#result + 1] = item
end
end
return result
""")
result = script(keys=[self._key_available(), self._key_workers(),
self._key_reservations(parent_item)],
args=([parent_item, self._get_worker_id(conn)] +
list(items)))
if result == -1:
raise LostLease(parent_item)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run_expiration(self, conn):
"""Return any items that have expired.""" |
# The logic here is sufficiently complicated, and we need
# enough random keys (Redis documentation strongly encourages
# not constructing key names in scripts) that we'll need to
# do this in multiple steps. This means that, when we do
# go in and actually expire things, we need to first check
# that they're still running.
# Get, and clear out, the list of expiring items
now = time.time()
script = conn.register_script("""
local result = redis.call("zrangebyscore", KEYS[1], 0, ARGV[1])
redis.call("zremrangebyscore", KEYS[1], 0, ARGV[1])
return result
""")
expiring = script(keys=[self._key_expiration()], args=[time.time()])
# Manually expire each item one by one
script = conn.register_script("""
-- item may have fallen out of the worker list, if someone finished
-- at just the very last possible moment (phew!)
local wworker = redis.call("hget", KEYS[3], "i" .. ARGV[1])
if not wworker then return end
-- we want to return item, plus everything it's reserved
local to_return = redis.call("smembers", KEYS[4])
to_return[#to_return + 1] = ARGV[1]
for i = 1, #to_return do
local pri = redis.call("hget", KEYS[2], to_return[i])
redis.call("zadd", KEYS[1], pri, to_return[i])
end
-- already removed from expiration list
-- remove from worker list too
redis.call("hdel", KEYS[3], "i" .. ARGV[1])
redis.call("hdel", KEYS[3], wworker)
""")
for item in expiring:
script(keys=[self._key_available(), self._key_priorities(),
self._key_workers(), self._key_reservations(item)],
args=[item]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cleanup_sweep_threads():
'''
Not used. Keeping this function in case we decide not to use
daemonized threads and it becomes necessary to clean up the
running threads upon exit.
'''
for dict_name, obj in globals().items():
if isinstance(obj, (TimedDict,)):
logging.info(
'Stopping thread for TimedDict {dict_name}'.format(
dict_name=dict_name))
obj.stop_sweep() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_expiration(self, key, ignore_missing=False,
additional_seconds=None, seconds=None):
'''
Alters the expiration time for a key. If the key is not
present, then raise an Exception unless `ignore_missing`
is set to `True`.
Args:
key: The key whose expiration we are changing.
ignore_missing (bool): If set, then return silently
if the key does not exist. Default is `False`.
additional_seonds (int): Add this many seconds to the
current expiration time.
seconds (int): Expire the key this many seconds from now.
'''
if key not in self.time_dict and ignore_missing:
return
elif key not in self.time_dict and not ignore_missing:
raise Exception('Key missing from `TimedDict` and '
'`ignore_missing` is False.')
if additional_seconds is not None:
self.time_dict[key] += additional_seconds
elif seconds is not None:
self.time_dict[key] = time.time() + seconds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def expire_key(self, key):
'''
Expire the key, delete the value, and call the callback function
if one is specified.
Args:
key: The ``TimedDict`` key
'''
value = self.base_dict[key]
del self[key]
if self.callback is not None:
self.callback(
key, value, *self.callback_args, **self.callback_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _open(self, file_path=None):
""" Opens the file specified by the given path. Raises ValueError if there is a problem with opening or reading the file. """ |
if file_path is None:
file_path = self.file_path
if not os.path.exists(file_path):
raise ValueError('Could not find file: {}'.format(file_path))
try:
f = open(file_path, encoding='utf-8', newline='')
except OSError as err:
self.log.error(str(err))
raise ValueError('Could not open file: {}'.format(file_path))
return f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dialect(self):
""" Returns a Dialect named tuple or None if the dataset file comprises a single column of data. If the dialect is not already known, then tries to determine it. Raises ValueError if it fails in the latter case. """ |
if self.is_single_col:
return None
if self.delimiter and self.quotechar:
return Dialect(self.delimiter, self.quotechar,
True if self.escapechar is None else False,
self.escapechar)
ext = os.path.basename(self.file_path).rsplit('.', maxsplit=1)
ext = ext[1].lower() if len(ext) > 1 else None
if ext in TSV_EXTENSIONS:
self.delimiter = '\t'
self.quotechar = '"'
else:
f = self._open()
lines = f.read().splitlines()
f.close()
if lines:
dialect = self._determine_dialect(lines)
else:
dialect = None
if dialect is None:
self.is_single_col = True
else:
self.delimiter = dialect.delimiter
self.quotechar = dialect.quotechar
self.escapechar = dialect.escapechar
return self.get_dialect() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_csv_reader(self, f, dialect):
""" Returns a csv.reader for the given file handler and csv Dialect named tuple. If the file has a header, it already will be gone through. Also, if self.ipa_col is not set, an attempt will be made to infer which the IPA column is. ValueError would be raised otherwise. """ |
reader = csv.reader(f,
delimiter = dialect.delimiter,
quotechar = dialect.quotechar,
doublequote = dialect.doublequote,
escapechar = dialect.escapechar)
if self.has_header:
header = next(reader)
if not isinstance(self.ipa_col, int):
self.ipa_col = self._infer_ipa_col(header)
else:
if not isinstance(self.ipa_col, int):
if not self.ipa_col:
raise ValueError('Cannot infer IPA column without header')
try:
self.ipa_col = int(self.ipa_col)
except ValueError:
raise ValueError('Cannot find column: {}'.format(self.ipa_col))
return reader |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_ipa_data(self):
""" Generator for iterating over the IPA strings found in the dataset file. Yields the IPA data string paired with the respective line number. """ |
dialect = self.get_dialect()
f = self._open()
try:
if dialect:
for res in self._gen_csv_data(f, dialect):
yield res
else:
for res in self._gen_txt_data(f):
yield res
finally:
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decompose(self):
"""Recursively destroys the contents of this tree.""" |
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ |
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML entity.""" |
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_file_values(env_file, fail_silently=True):
""" Borrowed from Honcho. """ |
env_data = {}
try:
with open(env_file) as f:
content = f.read()
except IOError:
if fail_silently:
logging.error("Could not read file '{0}'".format(env_file))
return env_data
raise
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
env_data[key] = val
return env_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_suffix(path):
""" Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str):
Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix. """ |
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SRU_Compute_CPU(activation_type, d, bidirectional=False, scale_x=1):
"""CPU version of the core SRU computation. Has the same interface as SRU_Compute_GPU() but is a regular Python function instead of a torch.autograd.Function because we don't implement backward() explicitly. """ |
def sru_compute_cpu(u, x, bias, init=None, mask_h=None):
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
k = u.size(-1) // d // bidir
if mask_h is None:
mask_h = 1
u = u.view(length, batch, bidir, d, k)
x_tilde = u[..., 0]
forget_bias, reset_bias = bias.view(2, bidir, d)
forget = (u[..., 1] + forget_bias).sigmoid()
reset = (u[..., 2] + reset_bias).sigmoid()
if k == 3:
x_prime = x.view(length, batch, bidir, d)
x_prime = x_prime * scale_x if scale_x != 1 else x_prime
else:
x_prime = u[..., 3]
h = Variable(x.data.new(length, batch, bidir, d))
if init is None:
c_init = Variable(x.data.new(batch, bidir, d).zero_())
else:
c_init = init.view(batch, bidir, d)
c_final = []
for di in range(bidir):
if di == 0:
time_seq = range(length)
else:
time_seq = range(length - 1, -1, -1)
c_prev = c_init[:, di, :]
for t in time_seq:
c_t = (c_prev - x_tilde[t, :, di, :]) * forget[t, :, di, :] + x_tilde[t, :, di, :]
c_prev = c_t
if activation_type == 0:
g_c_t = c_t
elif activation_type == 1:
g_c_t = c_t.tanh()
elif activation_type == 2:
g_c_t = nn.functional.relu(c_t)
else:
assert False, 'Activation type must be 0, 1, or 2, not {}'.format(activation_type)
h[t, :, di, :] = (g_c_t * mask_h - x_prime[t, :, di, :]) * reset[t, :, di, :] + x_prime[t, :, di, :]
c_final.append(c_t)
return h.view(length, batch, -1), torch.stack(c_final, dim=1).view(batch, -1)
return sru_compute_cpu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_encoding(binary):
"""Return the encoding type.""" |
try:
from chardet import detect
except ImportError:
LOGGER.error("Please install the 'chardet' module")
sys.exit(1)
encoding = detect(binary).get('encoding')
return 'iso-8859-1' if encoding == 'CP949' else encoding |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getch():
"""Request a single character input from the user.""" |
if sys.platform in ['darwin', 'linux']:
import termios
import tty
file_descriptor = sys.stdin.fileno()
settings = termios.tcgetattr(file_descriptor)
try:
tty.setraw(file_descriptor)
return sys.stdin.read(1)
finally:
termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)
elif sys.platform in ['cygwin', 'win32']:
import msvcrt
return msvcrt.getwch() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ismatch(text, pattern):
"""Test whether text contains string or matches regex.""" |
if hasattr(pattern, 'search'):
return pattern.search(text) is not None
else:
return pattern in text if Config.options.case_sensitive \
else pattern.lower() in text.lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logger():
"""Configure program logger.""" |
scriptlogger = logging.getLogger(__program__)
# ensure logger is not reconfigured
if not scriptlogger.hasHandlers():
# set log level
scriptlogger.setLevel(logging.INFO)
fmt = '%(name)s:%(levelname)s: %(message)s'
# configure terminal log
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(logging.Formatter(fmt))
scriptlogger.addHandler(streamhandler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pattern_logic_aeidon():
"""Return patterns to be used for searching subtitles via aeidon.""" |
if Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return Config.REGEX
else:
return Config.TERMS |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pattern_logic_srt():
"""Return patterns to be used for searching srt subtitles.""" |
if Config.options.pattern_files and Config.options.regex:
return prep_regex(prep_patterns(Config.options.pattern_files))
elif Config.options.pattern_files:
return prep_patterns(Config.options.pattern_files)
elif Config.options.regex:
return prep_regex(Config.REGEX)
else:
return Config.TERMS |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prep_patterns(filenames):
"""Load pattern files passed via options and return list of patterns.""" |
patterns = []
for filename in filenames:
try:
with open(filename) as file:
patterns += [l.rstrip('\n') for l in file]
except: # pylint: disable=W0702
LOGGER.error("Unable to load pattern file '%s'" % filename)
sys.exit(1)
if patterns:
# return a set to eliminate duplicates
return set(patterns)
else:
LOGGER.error('No terms were loaded')
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prep_regex(patterns):
"""Compile regex patterns.""" |
flags = 0 if Config.options.case_sensitive else re.I
return [re.compile(pattern, flags) for pattern in patterns] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prerequisites():
"""Display information about obtaining the aeidon module.""" |
url = "http://home.gna.org/gaupol/download.html"
debian = "sudo apt-get install python3-aeidon"
other = "python3 setup.py --user --without-gaupol clean install"
LOGGER.error(
"The aeidon module is missing!\n\n"
"Try '{0}' or the appropriate command for your package manager.\n\n"
"You can also download the tarball for gaupol (which includes "
"aeidon) at {1}. After downloading, unpack and run '{2}'."
.format(debian, url, other)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_elements(target, indices):
"""Remove multiple elements from a list and return result. This implementation is faster than the alternative below. Also note the creation of a new list to avoid altering the original. We don't have any current use for the original |
copied = list(target)
for index in reversed(indices):
del copied[index]
return copied |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_aeidon():
"""Prepare filenames and patterns then process subtitles with aeidon.""" |
extensions = ['ass', 'srt', 'ssa', 'sub']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_aeidon()
for filename in Config.filenames:
AeidonProject(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_srt():
"""Prepare filenames and patterns then process srt subtitles.""" |
extensions = ['srt']
Config.filenames = prep_files(Config.args, extensions)
Config.patterns = pattern_logic_srt()
for filename in Config.filenames:
SrtProject(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fixchars(self):
"""Replace characters or strings within subtitle file.""" |
for key in Config.CHARFIXES:
self.project.set_search_string(key)
self.project.set_search_replacement(Config.CHARFIXES[key])
self.project.replace_all() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open(self):
"""Open the subtitle file into an Aeidon project.""" |
try:
self.project.open_main(self.filename)
except UnicodeDecodeError:
with open(self.filename, 'rb') as openfile:
encoding = get_encoding(openfile.read())
try:
self.project.open_main(self.filename, encoding)
except UnicodeDecodeError:
LOGGER.error("'%s' encountered a fatal encoding error",
self.filename)
sys.exit(1)
except: # pylint: disable=W0702
open_error(self.filename)
except: # pylint: disable=W0702
open_error(self.filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Save subtitle file.""" |
try:
# ensure file is encoded properly while saving
self.project.main_file.encoding = 'utf_8'
self.project.save_main()
if self.fix:
LOGGER.info("Saved changes to '%s'", self.filename)
except: # pylint: disable=W0702
LOGGER.error("Unable to save '%s'", self.filename)
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self):
"""Search srt in project for cells matching list of terms.""" |
matches = []
for pattern in Config.patterns:
matches += self.termfinder(pattern)
return sorted(set(matches), key=int) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def termfinder(self, pattern):
"""Search srt in project for cells matching term.""" |
if Config.options.regex:
flags = re.M | re.S | \
(0 if Config.options.case_sensitive else re.I)
self.project.set_search_regex(
pattern, flags=flags)
else:
self.project.set_search_string(
pattern, ignore_case=not Config.options.case_sensitive)
matches = []
while True:
try:
if matches:
last = matches[-1]
new = self.project.find_next(last + 1)[0]
if new != last and new > last:
matches.append(new)
else:
break
else:
matches.append(self.project.find_next()[0])
except StopIteration:
break
return matches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fixchars(self, text):
"""Find and replace problematic characters.""" |
keys = ''.join(Config.CHARFIXES.keys())
values = ''.join(Config.CHARFIXES.values())
fixed = text.translate(str.maketrans(keys, values))
if fixed != text:
self.modified = True
return fixed |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prompt(self, matches):
"""Prompt user to remove cells from subtitle file.""" |
if Config.options.autoyes:
return matches
deletions = []
for match in matches:
os.system('clear')
print(self.cells[match])
print('----------------------------------------')
print("Delete cell %s of '%s'?" % (str(match + 1), self.filename))
response = getch().lower()
if response == 'y':
os.system('clear')
deletions.append(match)
elif response == 'n':
os.system('clear')
else:
if deletions or self.modified:
LOGGER.warning("Not saving changes made to '%s'",
self.filename)
sys.exit(0)
return deletions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def renumber(self):
"""Re-number cells.""" |
num = 0
for cell in self.cells:
cell_split = cell.splitlines()
if len(cell_split) >= 2:
num += 1
cell_split[0] = str(num)
yield '\n'.join(cell_split) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Format and save cells.""" |
# re-number cells
self.cells = list(self.renumber())
# add a newline to the last line if necessary
if not self.cells[-1].endswith('\n'):
self.cells[-1] += '\n'
# save the rejoined the list of cells
with open(self.filename, 'w') as file_open:
file_open.write('\n\n'.join(self.cells)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(self):
"""Return list of cells to be removed.""" |
matches = []
for index, cell in enumerate(self.cells):
for pattern in Config.patterns:
if ismatch(cell, pattern):
matches.append(index)
break
return matches |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split(self, text):
"""Split text into a list of cells.""" |
import re
if re.search('\n\n', text):
return text.split('\n\n')
elif re.search('\r\n\r\n', text):
return text.split('\r\n\r\n')
else:
LOGGER.error("'%s' does not appear to be a 'srt' subtitle file",
self.filename)
sys.exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keys(request):
"""Lists API keys. Compatible with jQuery DataTables.""" |
iDisplayStart = parse_int_param(request, 'iDisplayStart')
iDisplayLength = parse_int_param(request, 'iDisplayLength')
sEcho = parse_int_param(request, 'sEcho')
iSortCol_0 = parse_int_param(request, 'iSortCol_0')
sSortDir_0 = request.GET.get('sSortDir_0', 'asc')
sSearch = request.GET.get('sSearch')
columns = ['key', 'email', 'calls', 'latest_call', 'issued_on']
qry = Key.objects
if sSearch not in (None, ''):
qry = qry.filter(Q(key__icontains=sSearch)
| Q(email__icontains=sSearch)
| Q(name__icontains=sSearch)
| Q(org_name__icontains=sSearch)
| Q(org_url__icontains=sSearch))
qry = qry.values('key', 'email', 'issued_on').annotate(calls=Sum('reports__calls'),
latest_call=Max('reports__date'))
qry = qry.filter(calls__isnull=False)
qry = exclude_internal_keys(qry)
# TODO: Add multi-column sorting
if iSortCol_0 not in (None, ''):
sort_col_field = columns[iSortCol_0]
sort_spec = '{dir}{col}'.format(dir='-' if sSortDir_0 == 'desc' else '',
col=sort_col_field)
qry = qry.order_by(sort_spec)
result = {
'iTotalRecords': Key.objects.count(),
'iTotalDisplayRecords': qry.count(),
'sEcho': sEcho,
'aaData': [[k['key'],
'<a href="{0}">{1}</a>'.format(reverse('key_analytics', args=(k['key'], )), k['email']),
k['calls'],
k['latest_call'].isoformat(),
k['issued_on'].date().isoformat()]
for k in qry[iDisplayStart:iDisplayStart+iDisplayLength]]
}
return HttpResponse(content=json.dumps(result), status=200, content_type='application/json') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grouper_dict(d, n):
"""Evenly divide dictionary into fixed-length piece, no filled value if chunk size smaller than fixed-length. Usage:: 6: 'F', 7: 'G', 8: 'H', 9: 'I', 10: 'J'})) [{1: 'A', 2: 'B', 3: 'C'}, {4: 'D', 5: 'E', 6: 'F'}, {7: 'G', 8: 'H', 9: 'I'}, {10: 'J'}] """ |
chunk = dict()
counter = 0
for k, v in d.items():
counter += 1
chunk[k] = v
print(counter ,chunk)
if counter == n:
yield chunk
chunk = dict()
counter = 0
if len(chunk) > 0:
yield chunk |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def running_windows(iterable, size):
"""Generate n-size running windows. Usage:: [1, 2, 3] [2, 3, 4] [3, 4, 5] """ |
fifo = collections.deque(maxlen=size)
for i in iterable:
fifo.append(i)
if len(fifo) == size:
yield list(fifo) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shift_to_the_left(array, dist, pad=True, trim=True):
"""Shift array to the left. :param array: An iterable object. :type array: iterable object :param dist: how far you want to shift :type disk: int :param pad: pad array[-1] to the right. :type pad: boolean (default True) :param trim: trim the first ``#dist`` items. :type trim: boolean (default True) Usage:: [1, 2, 3, 4, 5, 6, 7, 8, 9, 9] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9] [1, 2, 3, 4, 5, 6, 7, 8, 9] Warning, with pad=False and trim=False, no change applied. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ |
if dist < 0:
raise ValueError("Shift distance has to greater or equal than 0.")
if pad:
if trim:
new_array = array[dist:] + [array[-1]] * dist
else:
new_array = array + [array[-1]] * dist
else:
if trim:
new_array = array[dist:]
else:
print("Warning, with pad=False and trim=False, no change applied.")
new_array = list(array)
return new_array |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_generator(generator, memory_efficient=True):
"""Count number of item in generator. memory_efficient=True, 3 times slower, but memory_efficient. memory_efficient=False, faster, but cost more memory. """ |
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_key(self, key, using_name=True):
"""Given a field key or name, return it's field key. """ |
try:
if using_name:
return self.f_name[key].key
else:
return self.f[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_object_key(self, key, using_name=True):
"""Given a object key or name, return it's object key. """ |
try:
if using_name:
return self.o_name[key].key
else:
return self.o[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip_a_folder(src, dst):
"""Add a folder and everything inside to zip archive. Example:: |---paper |--- algorithm.pdf |--- images |--- 1.jpg zip_a_folder("paper", "paper.zip") paper.zip |---paper |--- algorithm.pdf |--- images |--- 1.jpg **中文文档** 将整个文件夹添加到压缩包, 包括根目录本身。 """ |
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
dirname, basename = os.path.split(src)
os.chdir(dirname)
for dirname, _, fnamelist in os.walk(basename):
for fname in fnamelist:
newname = os.path.join(dirname, fname)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip_many_files(list_of_abspath, dst):
"""Add many files to a zip archive. **中文文档** 将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。 """ |
base_dir = os.getcwd()
with ZipFile(dst, "w") as f:
for abspath in list_of_abspath:
dirname, basename = os.path.split(abspath)
os.chdir(dirname)
f.write(basename)
os.chdir(base_dir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_gzip(content, abspath):
"""Write binary content to gzip file. **中文文档** 将二进制内容压缩后编码写入gzip压缩文件。 """ |
with gzip.open(abspath, "wb") as f:
f.write(content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_bases(bases):
""" Performs MRO linearization of a set of base classes. Yields each base class in turn. """ |
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
# Loop over sequences
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
# Select a good head
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError('Cannot create a consistent method '
'resolution order (MRO) for bases %s' %
', '.join([base.__name__ for base in bases]))
# Yield this base class
yield head
# Remove base class from all the other sequences
for seq in sequences:
if seq[0] == head:
del seq[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inherit_dict(base, namespace, attr_name, inherit=lambda k, v: True):
""" Perform inheritance of dictionaries. Returns a list of key and value pairs for values that were inherited, for post-processing. :param base: The base class being considered; see ``iter_bases()``. :param namespace: The dictionary of the new class being built. :param attr_name: The name of the attribute containing the dictionary to be inherited. :param inherit: Filtering function to determine if a given item should be inherited. If ``False`` or ``None``, item will not be added, but will be included in the returned items. If a function, the function will be called with the key and value, and the item will be added and included in the items list only if the function returns ``True``. By default, all items are added and included in the items list. """ |
items = []
# Get the dicts to compare
base_dict = getattr(base, attr_name, {})
new_dict = namespace.setdefault(attr_name, {})
for key, value in base_dict.items():
# Skip keys that have been overridden or that we shouldn't
# inherit
if key in new_dict or (inherit and not inherit(key, value)):
continue
# Inherit the key
if inherit:
new_dict[key] = value
# Save the item for post-processing
items.append((key, value))
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inherit_set(base, namespace, attr_name, inherit=lambda i: True):
""" Perform inheritance of sets. Returns a list of items that were inherited, for post-processing. :param base: The base class being considered; see ``iter_bases()``. :param namespace: The dictionary of the new class being built. :param attr_name: The name of the attribute containing the set to be inherited. :param inherit: Filtering function to determine if a given item should be inherited. If ``False`` or ``None``, item will not be added, but will be included in the returned items. If a function, the function will be called with the item, and the item will be added and included in the items list only if the function returns ``True``. By default, all items are added and included in the items list. """ |
items = []
# Get the sets to compare
base_set = getattr(base, attr_name, set())
new_set = namespace.setdefault(attr_name, set())
for item in base_set:
# Skip items that have been overridden or that we
# shouldn't inherit
if item in new_set or (inherit and not inherit(item)):
continue
# Inherit the item
if inherit:
new_set.add(item)
items.append(item)
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decrypt_result(self, *args, **kwargs):
""" Decrypts ProcessData result with comm keys :param args: :param kwargs: :return: """ |
if self.response is None:
raise ValueError('Empty response')
if self.response.response is None \
or 'result' not in self.response.response \
or self.response.response['result'] is None:
raise ValueError('No result data')
res_hex = self.response.response['result']
# Strip out the plaintext part
plain_length = bytes_to_long(from_hex(res_hex[0:4]))
if plain_length > 0:
res_hex = res_hex[4 + plain_length:]
else:
res_hex = res_hex[4:]
# Optionally strip trailing _... string
idx_trail = res_hex.find('_')
if idx_trail != -1:
res_hex = res_hex[0:idx_trail]
# Decode hex coding
res_bytes = from_hex(res_hex)
# Crypto stuff - check the length & padding
if len(res_bytes) < 16:
raise InvalidResponse('Result too short')
mac_given = res_bytes[-16:]
res_bytes = res_bytes[:-16]
# Check the MAC
mac_computed = cbc_mac(self.uo.mac_key, res_bytes)
if not str_equals(mac_given, mac_computed):
raise CryptoError('MAC invalid')
# Decrypt
decrypted = aes_dec(self.uo.enc_key, res_bytes)
if len(decrypted) < 1 + 4 + 8 or decrypted[0:1] != bchr(0xf1):
raise InvalidResponse('Invalid format')
self.resp_object_id = bytes_to_long(decrypted[1:5])
self.resp_nonce = EBUtils.demangle_nonce(decrypted[5:5 + EBConsts.FRESHNESS_NONCE_LEN])
self.decrypted = decrypted[5 + EBConsts.FRESHNESS_NONCE_LEN:]
self.decrypted = PKCS7.unpad(self.decrypted)
return self.response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, cls, rid):
"""Return record of given type with key `rid` 'Toto' Traceback (most recent call last):
ValueError: Unsupported record type "badcls" Traceback (most recent call last):
KeyError: 'No tstoretest record with id 2' """ |
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self, cls, record, user='undefined'):
"""Persist new record 'jane' Traceback (most recent call last):
ValueError: Unsupported record type "badcls" Traceback (most recent call last):
KeyError: 'There is already a record for tstoretest/1' Traceback (most recent call last):
ValueError: Undefined field Traceback (most recent call last):
ValueError: Bad record (INVALID_TEXT_REPRESENTATION) """ |
self.validate_record(cls, record)
record[CREATION_DATE] = record[UPDATE_DATE] = self.nowstr()
record[CREATOR] = record[UPDATER] = user
try:
return self.db.insert(cls, record)
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
logging.warning("{} {}: {}".format(
error.__class__.__name__,
psycopg2.errorcodes.lookup(error.pgcode), error.pgerror))
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, record[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad record ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, cls, rid, partialrecord, user='undefined'):
"""Update existing record 25 'jane' Traceback (most recent call last):
KeyError: 'No such record' Traceback (most recent call last):
KeyError: 'There is already a record for tstoretest/1' Traceback (most recent call last):
ValueError: Undefined field Traceback (most recent call last):
ValueError: Bad update (INVALID_TEXT_REPRESENTATION) """ |
self.validate_partial_record(cls, partialrecord)
partialrecord[UPDATE_DATE] = self.nowstr()
partialrecord[UPDATER] = user
try:
updatecount = self.db.update(cls, partialrecord, where={ID: rid})
if updatecount < 1:
raise KeyError('No such record')
except (psycopg2.IntegrityError, psycopg2.ProgrammingError,
psycopg2.DataError) as error:
if error.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION:
raise KeyError('There is already a record for {}/{}'.format(
cls, partialrecord[ID]))
elif error.pgcode == psycopg2.errorcodes.UNDEFINED_COLUMN:
raise ValueError('Undefined field')
else:
raise ValueError('Bad update ({})'.format(
psycopg2.errorcodes.lookup(error.pgcode))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, cls, rid, user='undefined'):
""" Delete a record by id. `user` currently unused. Would be used with soft deletes. 1 0 Traceback (most recent call last):
KeyError: 'No record tstoretest/1' """ |
self.validate_record_type(cls)
deletedcount = self.db.delete(cls, {ID: rid})
if deletedcount < 1:
raise KeyError('No record {}/{}'.format(cls, rid)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_record_type(self, cls):
""" Validate given record is acceptable. Traceback (most recent call last):
ValueError: Unsupported record type "bad" """ |
if self.record_types and cls not in self.record_types:
raise ValueError('Unsupported record type "' + cls + '"') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_record(self, cls, content_type, strdata):
""" Returns a record from serialized string representation. {u'id': u'1', u'name': u'Toto'} """ |
self.validate_record_type(cls)
parsedrecord = self.deserialize(content_type, strdata)
return self.post_process_record(cls, parsedrecord) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, cls, record):
""" Serialize the record to JSON. cls unused in this implementation. '{"id": "1", "name": "Toto"}' """ |
return json.dumps(record, cls=self.encoder) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deserialize(self, content_type, strdata):
"""Deserialize string of given content type. `self` unused in this implementation. {u'id': u'1', u'name': u'Toto'} Traceback (most recent call last):
ValueError: Unsupported content type "text/plain" """ |
if content_type != 'application/json':
raise ValueError('Unsupported content type "' + content_type + '"')
return json.loads(strdata) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frameify(self, state, data):
"""Yield the data as a single frame.""" |
try:
yield state.recv_buf + data
except FrameSwitch:
pass
finally:
state.recv_buf = '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frameify(self, state, data):
"""Yield chunk data as a single frame, and buffer the rest.""" |
# If we've pulled in all the chunk data, buffer the data
if state.chunk_remaining <= 0:
state.recv_buf += data
return
# Pull in any partially-processed data
data = state.recv_buf + data
# Determine how much belongs to the chunk
if len(data) <= state.chunk_remaining:
chunk = data
data = ''
else:
# Pull out only what's part of the chunk
chunk = data[:state.chunk_remaining]
data = data[state.chunk_remaining:]
# Update the state
state.recv_buf = data
state.chunk_remaining -= len(chunk)
# Yield the chunk
try:
yield chunk
except FrameSwitch:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def frameify(self, state, data):
"""Split data into a sequence of lines.""" |
# Pull in any partially-processed data
data = state.recv_buf + data
# Loop over the data
while data:
line, sep, rest = data.partition('\n')
# Did we have a whole line?
if sep != '\n':
break
# OK, update the data...
data = rest
# Now, strip off carriage return, if there is one
if self.carriage_return and line[-1] == '\r':
line = line[:-1]
# Yield the line
try:
yield line
except FrameSwitch:
break
# Put any remaining data back into the buffer
state.recv_buf = data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def streamify(self, state, frame):
"""Prepare frame for output as a byte-stuffed stream.""" |
# Split the frame apart for stuffing...
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_tab(cls):
"""Generate and return the COBS table.""" |
if not cls._tabs['dec_cobs']:
# Compute the COBS table for decoding
cls._tabs['dec_cobs']['\xff'] = (255, '')
cls._tabs['dec_cobs'].update(dict((chr(l), (l, '\0'))
for l in range(1, 255)))
# Compute the COBS table for encoding
cls._tabs['enc_cobs'] = [(255, '\xff'),
dict((l, chr(l))
for l in range(1, 255)),
]
return cls._tabs['dec_cobs'], cls._tabs['enc_cobs'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_tab_zpe(cls):
"""Generate and return the COBS ZPE table.""" |
if not cls._tabs['dec_cobs_zpe']:
# Compute the COBS ZPE table for decoding
cls._tabs['dec_cobs_zpe']['\xe0'] = (224, '')
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l, '\0'))
for l in range(1, 224)))
cls._tabs['dec_cobs_zpe'].update(dict((chr(l), (l - 224, '\0\0'))
for l in range(225, 256)))
# Compute the COBS ZPE table for encoding
cls._tabs['enc_cobs_zpe'] = [(224, '\xe0'),
dict((l, chr(l))
for l in range(1, 224)),
dict((l - 224, chr(l))
for l in range(225, 256))
]
return cls._tabs['dec_cobs_zpe'], cls._tabs['enc_cobs_zpe'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _decode(frame, tab):
"""Decode a frame with the help of the table.""" |
blocks = []
# Decode each block
while frame:
length, endseq = tab[frame[0]]
blocks.extend([frame[1:length], endseq])
frame = frame[length:]
# Remove one (and only one) trailing '\0' as necessary
if blocks and len(blocks[-1]) > 0:
blocks[-1] = blocks[-1][:-1]
# Return the decoded plaintext
return ''.join(blocks) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.