text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse(self):
''' Parses messages and puts them to receive queue '''
# Loop while we get new messages
while True:
status, self._buffer, packet = Packet.parse_msg(self._buffer)
# If message is incomplete -> break the loop
if status == PARSE_RESULT.INCOMPLETE:
return status
# If message is OK, add it to receive queue or send to the callback method
if status == PARSE_RESULT.OK and packet:
packet.received = datetime.datetime.now()
if isinstance(packet, UTETeachInPacket) and self.teach_in:
response_packet = packet.create_response_packet(self.base_id)
self.logger.info('Sending response to UTE teach-in.')
self.send(response_packet)
if self.__callback is None:
self.receive.put(packet)
else:
self.__callback(packet)
self.logger.debug(packet) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def base_id(self):
''' Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID. '''
# If base id is already set, return it.
if self._base_id is not None:
return self._base_id
# Send COMMON_COMMAND 0x08, CO_RD_IDBASE request to the module
self.send(Packet(PACKET.COMMON_COMMAND, data=[0x08]))
# Loop over 10 times, to make sure we catch the response.
# Thanks to timeout, shouldn't take more than a second.
# Unfortunately, all other messages received during this time are ignored.
for i in range(0, 10):
try:
packet = self.receive.get(block=True, timeout=0.1)
# We're only interested in responses to the request in question.
if packet.packet_type == PACKET.RESPONSE and packet.response == RETURN_CODE.OK and len(packet.response_data) == 4:
# Base ID is set in the response data.
self._base_id = packet.response_data
# Put packet back to the Queue, so the user can also react to it if required...
self.receive.put(packet)
break
# Put other packets back to the Queue.
self.receive.put(packet)
except queue.Empty:
continue
# Return the current Base ID (might be None).
return self._base_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cli(*, worker_settings, burst, check, watch, verbose):
""" Job queues in python with asyncio and redis. CLI to run the arq worker. """ |
sys.path.append(os.getcwd())
worker_settings = import_string(worker_settings)
logging.config.dictConfig(default_log_config(verbose))
if check:
exit(check_health(worker_settings))
else:
kwargs = {} if burst is None else {'burst': burst}
if watch:
loop = asyncio.get_event_loop()
loop.run_until_complete(watch_reload(watch, worker_settings, loop))
else:
run_worker(worker_settings, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next_cron( previous_dt: datetime, *, month: Union[None, set, int] = None, day: Union[None, set, int] = None, weekday: Union[None, set, int, str] = None, hour: Union[None, set, int] = None, minute: Union[None, set, int] = None, second: Union[None, set, int] = 0, microsecond: int = 123_456, ):
""" Find the next datetime matching the given parameters. """ |
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cron( coroutine: Union[str, Callable], *, name: Optional[str] = None, month: Union[None, set, int] = None, day: Union[None, set, int] = None, weekday: Union[None, set, int, str] = None, hour: Union[None, set, int] = None, minute: Union[None, set, int] = None, second: Union[None, set, int] = 0, microsecond: int = 123_456, run_at_startup: bool = False, unique: bool = True, timeout: Optional[SecondsTimedelta] = None, keep_result: Optional[float] = 0, max_tries: Optional[int] = 1, ) -> CronJob: """ Create a cron job, eg. it should be executed at specific times. Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the job will only be run once even if multiple workers are running. :param coroutine: coroutine function to run :param name: name of the job, if None, the name of the coroutine is used :param month: month(s) to run the job on, 1 - 12 :param day: day(s) to run the job on, 1 - 31 :param weekday: week day(s) to run the job on, 0 - 6 or mon - sun :param hour: hour(s) to run the job on, 0 - 23 :param minute: minute(s) to run the job on, 0 - 59 :param second: second(s) to run the job on, 0 - 59 :param microsecond: microsecond(s) to run the job on, defaults to 123456 as the world is busier at the top of a second, 0 - 1e6 :param run_at_startup: whether to run as worker starts :param unique: whether the job should be only be executed once at each time :param timeout: job timeout :param keep_result: how long to keep the result for :param max_tries: maximum number of tries for the job """ |
if isinstance(coroutine, str):
name = name or 'cron:' + coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return CronJob(
name or 'cron:' + coroutine.__qualname__,
coroutine,
month,
day,
weekday,
hour,
minute,
second,
microsecond,
run_at_startup,
unique,
timeout,
keep_result,
max_tries,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_unix_ms(dt: datetime) -> int: """ convert a datetime to number of milliseconds since 1970 and calculate timezone offset """ |
utcoffset = dt.utcoffset()
ep = epoch if utcoffset is None else epoch_tz
return as_int((dt - ep).total_seconds() * 1000) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis: """ Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails. Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance, thus allowing job enqueuing. """ |
settings = settings or RedisSettings()
addr = settings.host, settings.port
try:
pool = await aioredis.create_redis_pool(
addr,
db=settings.database,
password=settings.password,
timeout=settings.conn_timeout,
encoding='utf8',
commands_factory=ArqRedis,
)
except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:
if _retry < settings.conn_retries:
logger.warning(
'redis connection error %s:%s %s %s, %d retries remaining...',
settings.host,
settings.port,
e.__class__.__name__,
e,
settings.conn_retries - _retry,
)
await asyncio.sleep(settings.conn_retry_delay)
else:
raise
else:
if _retry > 0:
logger.info('redis connection successful')
return pool
# recursively attempt to create the pool outside the except block to avoid
# "During handling of the above exception..." madness
return await create_pool(settings, _retry=_retry + 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def enqueue_job( self, function: str, *args: Any, _job_id: Optional[str] = None, _defer_until: Optional[datetime] = None, _defer_by: Union[None, int, float, timedelta] = None, _expires: Union[None, int, float, timedelta] = None, _job_try: Optional[int] = None, **kwargs: Any, ) -> Optional[Job]: """ Enqueue a job. :param function: Name of the function to call :param args: args to pass to the function :param _job_id: ID of the job, can be used to enforce job uniqueness :param _defer_until: datetime at which to run the job :param _defer_by: duration to wait before running the job :param _expires: if the job still hasn't started after this duration, do not run it :param _job_try: useful when re-enqueueing jobs within a job :param kwargs: any keyword arguments to pass to the function :return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists """ |
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def all_job_results(self) -> List[JobResult]: """ Get results for all jobs in redis. """ |
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def func( coroutine: Union[str, Function, Callable], *, name: Optional[str] = None, keep_result: Optional[SecondsTimedelta] = None, timeout: Optional[SecondsTimedelta] = None, max_tries: Optional[int] = None, ) -> Function: """ Wrapper for a job function which lets you configure more settings. :param coroutine: coroutine function to call, can be a string to import :param name: name for function, if None, ``coroutine.__qualname__`` is used :param keep_result: duration to keep the result for, if 0 the result is not kept :param timeout: maximum time the job should take :param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying """ |
if isinstance(coroutine, Function):
return coroutine
if isinstance(coroutine, str):
name = name or coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self) -> None: """ Sync function to run the worker, finally closes worker connections. """ |
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def async_run(self) -> None: """ Asynchronously run the worker, does not close connections. Useful when testing. """ |
self.main_task = self.loop.create_task(self.main())
await self.main_task |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any: """ Get the result of the job, including waiting if it's not yet available. If the job raised an exception, it will be raised here. :param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever :param pole_delay: how often to poll redis for the job result """ |
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def info(self) -> Optional[JobDef]: """ All information on a job, including its result if it's available, does not wait for the result. """ |
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def result_info(self) -> Optional[JobResult]: """ Information about the job result if available, does not wait for the result. Does not raise an exception even if the job raised one. """ |
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def status(self) -> JobStatus: """ Status of the job. """ |
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def email(self):
""" Shortcut property for finding the e-mail address or bot URL. """ |
if "profile" in self._raw:
email = self._raw["profile"].get("email")
elif "bot_url" in self._raw:
email = self._raw["bot_url"]
else:
email = None
if not email:
logging.debug("No email found for %s", self._raw.get("name"))
return email |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image_url(self, pixel_size=None):
""" Get the URL for the user icon in the desired pixel size, if it exists. If no size is supplied, give the URL for the full-size image. """ |
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fields(self):
""" Fetch the "fields" list, and process the text within each field, including markdown processing if the message indicates that the fields contain markdown. Only present on attachments, not files--this abstraction isn't 100% awesome.' """ |
process_markdown = ("fields" in self._raw.get("mrkdwn_in", []))
fields = self._raw.get("fields", [])
if fields:
logging.debug("Rendering with markdown markdown %s for %s", process_markdown, fields)
return [
{"title": e["title"], "short": e["short"], "value": self._formatter.render_text(e["value"], process_markdown)}
for e in fields
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_dm_users(self):
""" Gets the info for the members within the dm Returns a list of all dms with the members that have ever existed :rtype: [object] { id: <id> users: [<user_id>] } """ |
dm_data = self._read_from_json("dms.json")
dms = dm_data.values()
all_dms_users = []
for dm in dms:
# checks if messages actually exsist
if dm["id"] not in self._EMPTY_DMS:
# added try catch for users from shared workspaces not in current workspace
try:
dm_members = {"id": dm["id"], "users": [self.__USER_DATA[m] for m in dm["members"]]}
all_dms_users.append(dm_members)
except KeyError:
dm_members = None
return all_dms_users |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_mpim_users(self):
""" Gets the info for the members within the multiple person instant message Returns a list of all dms with the members that have ever existed :rtype: [object] { name: <name> users: [<user_id>] } """ |
mpim_data = self._read_from_json("mpims.json")
mpims = [c for c in mpim_data.values()]
all_mpim_users = []
for mpim in mpims:
mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]}
all_mpim_users.append(mpim_members)
return all_mpim_users |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_messages(self, names, data, isDms=False):
""" Creates object of arrays of messages from each json file specified by the names or ids :param [str] names: names of each group of messages :param [object] data: array of objects detailing where to get the messages from in the directory structure :param bool isDms: boolean value used to tell if the data is dm data so the function can collect the empty dm directories and store them in memory only :return: object of arrays of messages :rtype: object """ |
chats = {}
empty_dms = []
formatter = SlackFormatter(self.__USER_DATA, data)
for name in names:
# gets path to dm directory that holds the json archive
dir_path = os.path.join(self._PATH, name)
messages = []
# array of all days archived
day_files = glob.glob(os.path.join(dir_path, "*.json"))
# this is where it's skipping the empty directories
if not day_files:
if isDms:
empty_dms.append(name)
continue
for day in sorted(day_files):
with io.open(os.path.join(self._PATH, day), encoding="utf8") as f:
# loads all messages
day_messages = json.load(f)
messages.extend([Message(formatter, d) for d in day_messages])
chats[name] = messages
if isDms:
self._EMPTY_DMS = empty_dms
return chats |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_from_json(self, file):
""" Reads the file specified from json and creates an object based on the id of each element :param str file: Path to file of json to read :return: object of data read from json file :rtype: object """ |
try:
with io.open(os.path.join(self._PATH, file), encoding="utf8") as f:
return {u["id"]: u for u in json.load(f)}
except IOError:
return {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_bytes(s, encoding="utf8"):
"""Converts str s to bytes""" |
if PY_VERSION == 2:
b = bytes(s)
elif PY_VERSION == 3:
b = bytes(s, encoding)
else:
raise ValueError("Is Python 4 out already?")
return b |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def SHA1_file(filepath, extra=b''):
""" Returns hex digest of SHA1 hash of file at filepath :param str filepath: File to hash :param bytes extra: Extra content added to raw read of file before taking hash :return: hex digest of hash :rtype: str """ |
h = hashlib.sha1()
with io.open(filepath, 'rb') as f:
for chunk in iter(lambda: f.read(h.block_size), b''):
h.update(chunk)
h.update(extra)
return h.hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_archive(filepath):
""" Returns the path of the archive :param str filepath: Path to file to extract or read :return: path of the archive :rtype: str """ |
# Checks if file path is a directory
if os.path.isdir(filepath):
path = os.path.abspath(filepath)
print("Archive already extracted. Viewing from {}...".format(path))
return path
# Checks if the filepath is a zipfile and continues to extract if it is
# if not it raises an error
elif not zipfile.is_zipfile(filepath):
# Misuse of TypeError? :P
raise TypeError("{} is not a zipfile".format(filepath))
archive_sha = SHA1_file(
filepath=filepath,
# Add version of slackviewer to hash as well so we can invalidate the cached copy
# if there are new features added
extra=to_bytes(slackviewer.__version__)
)
extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha)
if os.path.exists(extracted_path):
print("{} already exists".format(extracted_path))
else:
# Extract zip
with zipfile.ZipFile(filepath) as zip:
print("{} extracting to {}...".format(filepath, extracted_path))
zip.extractall(path=extracted_path)
print("{} extracted to {}".format(filepath, extracted_path))
# Add additional file with archive info
create_archive_info(filepath, extracted_path, archive_sha)
return extracted_path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_archive_info(filepath, extracted_path, archive_sha=None):
""" Saves archive info to a json file :param str filepath: Path to directory of archive :param str extracted_path: Path to directory of archive :param str archive_sha: SHA string created when archive was extracted from zip """ |
archive_info = {
"sha1": archive_sha,
"filename": os.path.split(filepath)[1],
}
with io.open(
os.path.join(
extracted_path,
".slackviewer_archive_info.json",
), 'w+', encoding="utf-8"
) as f:
s = json.dumps(archive_info, ensure_ascii=False)
s = to_unicode(s)
f.write(s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all(self, key, fallback=None):
"""returns all header values for given key""" |
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, key, value):
"""add header value""" |
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def attach(self, attachment, filename=None, ctype=None):
""" attach a file :param attachment: File to attach, given as :class:`~alot.db.attachment.Attachment` object or path to a file. :type attachment: :class:`~alot.db.attachment.Attachment` or str :param filename: filename to use in content-disposition. Will be ignored if `path` matches multiple files :param ctype: force content-type to be used for this attachment :type ctype: str """ |
if isinstance(attachment, Attachment):
self.attachments.append(attachment)
elif isinstance(attachment, str):
path = os.path.expanduser(attachment)
part = helper.mimewrap(path, filename, ctype)
self.attachments.append(Attachment(part))
else:
raise TypeError('attach accepts an Attachment or str')
if self.sent_time:
self.modified_since_sent = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_template(self, tmp, reset=False, only_body=False):
"""parses a template or user edited string to fills this envelope. :param tmp: the string to parse. :type tmp: str :param reset: remove previous envelope content :type reset: bool """ |
logging.debug('GoT: """\n%s\n"""', tmp)
if self.sent_time:
self.modified_since_sent = True
if only_body:
self.body = tmp
else:
m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)',
tmp)
assert m
d = m.groupdict()
headertext = d['h']
self.body = d['b']
# remove existing content
if reset:
self.headers = {}
# go through multiline, utf-8 encoded headers
# we decode the edited text ourselves here as
# email.message_from_file can't deal with raw utf8 header values
key = value = None
for line in headertext.splitlines():
if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair
if key and value: # save old one from stack
self.add(key, value) # save
key, value = line.strip().split(':', 1) # parse new pair
# strip spaces, otherwise we end up having " foo" as value
# of "Subject: foo"
value = value.strip()
elif key and value: # append new line without key prefix
value += line
if key and value: # save last one if present
self.add(key, value)
# interpret 'Attach' pseudo header
if 'Attach' in self:
to_attach = []
for line in self.get_all('Attach'):
gpath = os.path.expanduser(line.strip())
to_attach += [g for g in glob.glob(gpath)
if os.path.isfile(g)]
logging.debug('Attaching: %s', to_attach)
for path in to_attach:
self.attach(path)
del self['Attach'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_commandline(s, comments=False, posix=True):
""" splits semi-colon separated commandlines """ |
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def string_sanitize(string, tab_width=8):
r""" strips, and replaces non-printable characters :param tab_width: number of spaces to replace tabs with. Read from `globals.tabwidth` setting if `None` :type tab_width: int or `None` ' foobar ' 'foo bar' 'foo bar' """ |
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def string_decode(string, enc='ascii'):
""" safely decodes string to unicode bytestring, respecting `enc` as a hint. :param string: the string to decode :type string: str or unicode :type enc: str :returns: the unicode decoded input string :rtype: unicode """ |
if enc is None:
enc = 'ascii'
try:
string = str(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already str
pass
return string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis""" |
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'…'
return string[:maxlen] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call_cmd(cmdlist, stdin=None):
""" get a shell commands output, error message and return value and immediately return. .. warning:: This returns with the first screen content for interactive commands. :param cmdlist: shellcommand to call, already splitted into a list accepted by :meth:`subprocess.Popen` :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str, bytes, or None :return: triple of stdout, stderr, return value of the shell command :rtype: str, str, int """ |
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc)
try:
logging.debug("Calling %s" % cmdlist)
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
else:
out, err = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return out, err, ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def call_cmd_async(cmdlist, stdin=None, env=None):
"""Given a command, call that command asynchronously and return the output. This function only handles `OSError` when creating the subprocess, any other exceptions raised either durring subprocess creation or while exchanging data with the subprocess are the caller's responsibility to handle. If such an `OSError` is caught, then returncode will be set to 1, and the error value will be set to the str() method fo the exception. :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str :return: Tuple of stdout, stderr, returncode :rtype: tuple[str, str, int] """ |
termenc = urwid.util.detected_encoding
cmdlist = [s.encode(termenc) for s in cmdlist]
environment = os.environ.copy()
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
try:
proc = await asyncio.create_subprocess_exec(
*cmdlist,
env=environment,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE if stdin else None)
except OSError as e:
return ('', str(e), 1)
out, err = await proc.communicate(stdin.encode(termenc) if stdin else None)
return (out.decode(termenc), err.decode(termenc), proc.returncode) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_mimetype(blob):
""" uses file magic to determine the mime-type of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: mime-type, falls back to 'application/octet-stream' :rtype: str """ |
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_encoding(blob):
""" uses file magic to determine the encoding of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: encoding :rtype: str """ |
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def libmagic_version_at_least(version):
""" checks if the libmagic library installed is more recent than a given version. :param version: minimum version expected in the form XYY (i.e. 5.14 -> 514) with XYY >= 513 """ |
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME part according to the content type. The content type is auto detected from the actual file contents and the file name if it is not given. :param path: the path to the file contents :type path: str :param filename: the file name to use in the generated MIME part :type filename: str or None :param ctype: the content type of the file contents in path :type ctype: str or None :returns: the message MIME part storing the data from path :rtype: subclasses of email.mime.base.MIMEBase """ |
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used in the tempfile module for a given mailcap nametemplate string""" |
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_mailto(mailto_str):
""" Interpret mailto-string :param mailto_str: the string to interpret. Must conform to :rfc:2368. :type mailto_str: str :return: the header fields and the body found in the mailto link as a tuple of length two :rtype: tuple(dict(str->list(str)), str) """ |
if mailto_str.startswith('mailto:'):
import urllib.parse
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.parse.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.parse.unquote(value)
elif value:
headers[key] = [urllib.parse.unquote(value)]
return (headers, body)
else:
return (None, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, query=''):
"""looks up all contacts where name or address match query""" |
res = []
query = re.compile('.*%s.*' % re.escape(query), self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_focus(self, pos):
"Set the focus in the underlying body widget."
logging.debug('setting focus to %s ', pos)
self.body.set_focus(pos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_parent(self):
"""move focus to parent of currently focussed message""" |
mid = self.get_selected_mid()
newpos = self._tree.parent_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_first_reply(self):
"""move focus to first reply to currently focussed message""" |
mid = self.get_selected_mid()
newpos = self._tree.first_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_last_reply(self):
"""move focus to last reply to currently focussed message""" |
mid = self.get_selected_mid()
newpos = self._tree.last_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_next_sibling(self):
"""focus next sibling of currently focussed message in thread tree""" |
mid = self.get_selected_mid()
newpos = self._tree.next_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_prev_sibling(self):
""" focus previous sibling of currently focussed message in thread tree """ |
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_next(self):
"""focus next message in depth first order""" |
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_prev(self):
"""focus previous message in depth first order""" |
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the first message tree that matches the given property""" |
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_next_matching(self, querystring):
"""focus next matching message in depth first order""" |
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.next_position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order""" |
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_next_unfolded(self):
"""focus next unfolded message in depth first order""" |
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def focus_prev_unfolded(self):
"""focus previous unfolded message in depth first order""" |
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.prev_position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand(self, msgpos):
"""expand message at given position""" |
MT = self._tree[msgpos]
MT.expand(MT.root) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collapse(self, msgpos):
"""collapse message at given position""" |
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collapse_all(self):
"""collapse all messages in thread""" |
for MT in self.messagetrees():
MT.collapse(MT.root)
self.focus_selected_message() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unfold_matching(self, querystring, focus_first=True):
""" expand all messages that match a given querystring. :param querystring: query to match :type querystring: str :param focus_first: set the focus to the first matching message :type focus_first: bool """ |
first = None
for MT in self.messagetrees():
msg = MT._message
if msg.matches(querystring):
MT.expand(MT.root)
if first is None:
first = (self._tree.position_of_messagetree(MT), MT.root)
self.body.set_focus(first)
else:
MT.collapse(MT.root)
self.body.refresh() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __cmp(self, other, comparitor):
"""Shared comparison method.""" |
if not isinstance(other, TagWidget):
return NotImplemented
self_len = len(self.translated)
oth_len = len(other.translated)
if (self_len == 1) is not (oth_len == 1):
return comparitor(self_len, oth_len)
return comparitor(self.translated.lower(), other.translated.lower()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None`
'''
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance(error_msg, str)
if not sigs:
error_msg = error_msg or u'no signature found'
elif not error_msg:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, since we did not break from the loop.
sig_from = key.uids[0].uid
except GPGProblem:
sig_from = sigs[0].fpr
sig_known = False
if error_msg:
msg = 'Invalid: {}'.format(error_msg)
elif uid_trusted:
msg = 'Valid: {}'.format(sig_from)
else:
msg = 'Untrusted: {}'.format(sig_from)
mail.add_header(X_SIGNATURE_VALID_HEADER,
'False' if (error_msg or not sig_known) else 'True')
mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_signatures(original, message, params):
"""Shared code for handling message signatures. RFC 3156 is quite strict: * exactly two messages * the second is of type 'application/pgp-signature' * the second contains the detached signature :param original: The original top-level mail. This is required to attache special headers to :type original: :class:`email.message.Message` :param message: The multipart/signed payload to verify :type message: :class:`email.message.Message` :param params: the message parameters as returned by :func:`get_params` :type params: dict[str, str] """ |
malformed = None
if len(message.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(message.get_payload()))
else:
ct = message.get_payload(1).get_content_type()
if ct != _APP_PGP_SIG:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_SIG, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've seen a message
# with 'PGP-'. maybe we should be more permissive here, or maybe not, this
# is crypto stuff...
if not params.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
params.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(
message.get_payload(0).as_bytes(policy=email.policy.SMTP),
message.get_payload(1).get_payload(decode=True))
except GPGProblem as e:
malformed = str(e)
add_signature_headers(original, sigs, malformed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_encrypted(original, message, session_keys=None):
"""Handle encrypted messages helper. RFC 3156 is quite strict: * exactly two messages * the first is of type 'application/pgp-encrypted' * the first contains 'Version: 1' * the second is of type 'application/octet-stream' * the second contains the encrypted and possibly signed data :param original: The original top-level mail. This is required to attache special headers to :type original: :class:`email.message.Message` :param message: The multipart/signed payload to verify :type message: :class:`email.message.Message` :param session_keys: a list OpenPGP session keys :type session_keys: [str] """ |
malformed = False
ct = message.get_payload(0).get_content_type()
if ct != _APP_PGP_ENC:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_ENC, ct)
want = 'application/octet-stream'
ct = message.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want, ct)
if not malformed:
# This should be safe because PGP uses US-ASCII characters only
payload = message.get_payload(1).get_payload().encode('ascii')
try:
sigs, d = crypto.decrypt_verify(payload, session_keys)
except GPGProblem as e:
# signature verification failures end up here too if the combined
# method is used, currently this prevents the interpretation of the
# recovered plain text mail. maybe that's a feature.
malformed = str(e)
else:
n = decrypted_message_from_bytes(d, session_keys)
# add the decrypted message to message. note that n contains all
# the attachments, no need to walk over n here.
original.attach(n)
original.defects.extend(n.defects)
# there are two methods for both signed and encrypted data, one is
# called 'RFC 1847 Encapsulation' by RFC 3156, and one is the
# 'Combined method'.
if not sigs:
# 'RFC 1847 Encapsulation', the signature is a detached
# signature found in the recovered mime message of type
# multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
original[k] = n[k]
else:
# 'Combined method', the signatures are returned by the
# decrypt_verify function.
# note that if we reached this point, we know the signatures
# are valid. if they were not valid, the else block of the
# current try would not have been executed
add_signature_headers(original, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg, policy=email.policy.SMTP)
content.set_charset('utf-8')
original.attach(content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def decrypted_message_from_message(m, session_keys=None):
'''Detect and decrypt OpenPGP encrypted data in an email object. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param m: an email object
:param session_keys: a list OpenPGP session keys
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
# make sure no one smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
if m.is_multipart():
p = get_params(m)
# handle OpenPGP signed data
if (m.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, m, p)
# handle OpenPGP encrypted data
elif (m.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC and
'Version: 1' in m.get_payload(0).get_payload()):
_handle_encrypted(m, m, session_keys)
# It is also possible to put either of the abov into a multipart/mixed
# segment
elif m.get_content_subtype() == 'mixed':
sub = m.get_payload(0)
if sub.is_multipart():
p = get_params(sub)
if (sub.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, sub, p)
elif (sub.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC):
_handle_encrypted(m, sub, session_keys)
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decrypted_message_from_bytes(bytestring, session_keys=None):
"""Create a Message from bytes. :param bytes bytestring: an email message as raw bytes :param session_keys: a list OpenPGP session keys """ |
return decrypted_message_from_message(
email.message_from_bytes(bytestring, policy=email.policy.SMTP),
session_keys) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_part(part, field_key='copiousoutput'):
""" renders a non-multipart email part into displayable plaintext by piping its payload through an external script. The handler itself is determined by the mailcap entry for this part's ctype. """ |
ctype = part.get_content_type()
raw_payload = remove_cte(part)
rendered_payload = None
# get mime handler
_, entry = settings.mailcap_find_match(ctype, key=field_key)
if entry is not None:
tempfile_name = None
stdin = None
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
with tempfile.NamedTemporaryFile(
delete=False, prefix=prefix, suffix=suffix) \
as tmpfile:
tmpfile.write(raw_payload)
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command
parms = tuple('='.join(p) for p in part.get_params())
# create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s', cmd)
logging.debug('parms: %s', str(parms))
cmdlist = split_commandstring(cmd)
# call handler
stdout, _, _ = helper.call_cmd(cmdlist, stdin=stdin)
if stdout:
rendered_payload = stdout
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
return rendered_payload |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_cte(part, as_string=False):
"""Interpret MIME-part according to it's Content-Transfer-Encodings. This returns the payload of `part` as string or bytestring for display, or to be passed to an external program. In the raw file the payload may be encoded, e.g. in base64, quoted-printable, 7bit, or 8bit. This method will look for one of the above Content-Transfer-Encoding header and interpret the payload accordingly. Incorrect header values (common in spam messages) will be interpreted as lenient as possible and will result in INFO-level debug messages. ..Note:: All this may be depricated in favour of `email.contentmanager.raw_data_manager` (v3.6+) :param email.Message part: The part to decode :param bool as_string: If true return a str, otherwise return bytes :returns: The mail with any Content-Transfer-Encoding removed :rtype: Union[str, bytes] """ |
enc = part.get_content_charset() or 'ascii'
cte = str(part.get('content-transfer-encoding', '7bit')).lower().strip()
payload = part.get_payload()
sp = '' # string variant of return value
bp = b'' # bytestring variant
logging.debug('Content-Transfer-Encoding: "{}"'.format(cte))
if cte not in ['quoted-printable', 'base64', '7bit', '8bit', 'binary']:
logging.info('Unknown Content-Transfer-Encoding: "{}"'.format(cte))
# switch through all sensible cases
# starting with those where payload is already a str
if '7bit' in cte or 'binary' in cte:
logging.debug('assuming Content-Transfer-Encoding: 7bit')
sp = payload
if as_string:
return sp
bp = payload.encode('utf-8')
return bp
# the remaining cases need decoding and define only bt;
# decoding into a str is done at the end if requested
elif '8bit' in cte:
logging.debug('assuming Content-Transfer-Encoding: 8bit')
# Python's mail library may decode 8bit as raw-unicode-escape, so
# we need to encode that back to bytes so we can decode it using
# the correct encoding, or it might not, in which case assume that
# the str representation we got is correct.
bp = payload.encode('raw-unicode-escape')
elif 'quoted-printable' in cte:
logging.debug('assuming Content-Transfer-Encoding: quoted-printable')
bp = quopri.decodestring(payload.encode('ascii'))
elif 'base64' in cte:
logging.debug('assuming Content-Transfer-Encoding: base64')
bp = base64.b64decode(payload)
else:
logging.debug('failed to interpret Content-Transfer-Encoding: '
'"{}"'.format(cte))
# by now, bp is defined, sp is not.
if as_string:
try:
sp = bp.decode(enc)
except LookupError:
# enc is unknown;
# fall back to guessing the correct encoding using libmagic
sp = helper.try_decode(bp)
except UnicodeDecodeError as emsg:
# the mail contains chars that are not enc-encoded.
# libmagic works better than just ignoring those
logging.debug('Decoding failure: {}'.format(emsg))
sp = helper.try_decode(bp)
return sp
return bp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_body(mail, types=None, field_key='copiousoutput'):
"""Returns a string view of a Message. If the `types` argument is set then any encoding types there will be used as the prefered encoding to extract. If `types` is None then :ref:`prefer_plaintext <prefer-plaintext>` will be consulted; if it is True then text/plain parts will be returned, if it is false then text/html will be returned if present or text/plain if there are no text/html parts. :param mail: the mail to use :type mail: :class:`email.Message` :param types: mime content types to use for body string :type types: list[str] :returns: The combined text of any parts to be used :rtype: str """ |
preferred = 'text/plain' if settings.get(
'prefer_plaintext') else 'text/html'
has_preferred = False
# see if the mail has our preferred type
if types is None:
has_preferred = list(typed_subpart_iterator(
mail, *preferred.split('/')))
body_parts = []
for part in mail.walk():
# skip non-leaf nodes in the mail tree
if part.is_multipart():
continue
ctype = part.get_content_type()
if types is not None:
if ctype not in types:
continue
cd = part.get('Content-Disposition', '')
if cd.startswith('attachment'):
continue
# if the mail has our preferred type, we only keep this type
# note that if types != None, has_preferred always stays False
if has_preferred and ctype != preferred:
continue
if ctype == 'text/plain':
body_parts.append(string_sanitize(remove_cte(part, as_string=True)))
else:
rendered_payload = render_part(part)
if rendered_payload: # handler had output
body_parts.append(string_sanitize(rendered_payload))
# mark as attachment
elif cd:
part.replace_header('Content-Disposition', 'attachment; ' + cd)
else:
part.add_header('Content-Disposition', 'attachment;')
return u'\n\n'.join(body_parts) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode_header(header, normalize=False):
""" decode a header value to a unicode string values are usually a mixture of different substrings encoded in quoted printable using different encodings. This turns it into a single unicode string :param header: the header value :type header: str :param normalize: replace trailing spaces after newlines :type normalize: bool :rtype: str """ |
# some mailers send out incorrectly escaped headers
# and double quote the escaped realname part again. remove those
# RFC: 2047
regex = r'"(=\?.+?\?.+?\?[^ ?]+\?=)"'
value = re.sub(regex, r'\1', header)
logging.debug("unquoted header: |%s|", value)
# otherwise we interpret RFC2822 encoding escape sequences
valuelist = email.header.decode_header(value)
decoded_list = []
for v, enc in valuelist:
v = string_decode(v, enc)
decoded_list.append(string_sanitize(v))
value = ''.join(decoded_list)
if normalize:
value = re.sub(r'\n\s+', r' ', value)
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_threads(self, querystring, sort='newest_first', exclude_tags=None):
""" asynchronously look up thread ids matching `querystring`. :param querystring: The query string to use for the lookup :type querystring: str. :param sort: Sort order. one of ['oldest_first', 'newest_first', 'message_id', 'unsorted'] :type query: str :param exclude_tags: Tags to exclude by default unless included in the search :type exclude_tags: list of str :returns: a pipe together with the process that asynchronously writes to it. :rtype: (:class:`multiprocessing.Pipe`, :class:`multiprocessing.Process`) """ |
assert sort in self._sort_orders
q = self.query(querystring)
q.set_sort(self._sort_orders[sort])
if exclude_tags:
for tag in exclude_tags:
q.exclude_tag(tag)
return self.async_(q.search_threads, (lambda a: a.get_thread_id())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_message(self, path, tags=None, afterwards=None):
""" Adds a file to the notmuch index. :param path: path to the file :type path: str :param tags: tagstrings to add :type tags: list of str :param afterwards: callback to trigger after adding :type afterwards: callable or None """ |
tags = tags or []
if self.ro:
raise DatabaseROError()
if not is_subdir_of(path, self.path):
msg = 'message path %s ' % path
msg += ' is not below notmuchs '
msg += 'root path (%s)' % self.path
raise DatabaseError(msg)
else:
self.writequeue.append(('add', afterwards, path, tags)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_message(self, message, afterwards=None):
""" Remove a message from the notmuch index :param message: message to remove :type message: :class:`Message` :param afterwards: callback to trigger after removing :type afterwards: callable or None """ |
if self.ro:
raise DatabaseROError()
path = message.get_filename()
self.writequeue.append(('remove', afterwards, path)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_named_query(self, alias, querystring, afterwards=None):
""" add an alias for a query string. These are stored in the notmuch database and can be used as part of more complex queries using the syntax "query:alias". See :manpage:`notmuch-search-terms(7)` for more info. :param alias: name of shortcut :type alias: str :param querystring: value, i.e., the full query string :type querystring: str :param afterwards: callback to trigger after adding the alias :type afterwards: callable or None """ |
if self.ro:
raise DatabaseROError()
self.writequeue.append(('setconfig', afterwards, 'query.' + alias,
querystring)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_named_query(self, alias, afterwards=None):
""" remove a named query from the notmuch database. :param alias: name of shortcut :type alias: str :param afterwards: callback to trigger after adding the alias :type afterwards: callable or None """ |
if self.ro:
raise DatabaseROError()
self.writequeue.append(('setconfig', afterwards, 'query.' + alias, '')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def RFC3156_micalg_from_algo(hash_algo):
""" Converts a GPGME hash algorithm name to one conforming to RFC3156. GPGME returns hash algorithm names such as "SHA256", but RFC3156 says that programs need to use names such as "pgp-sha256" instead. :param str hash_algo: GPGME hash_algo :returns: the lowercase name of of the algorithm with "pgp-" prepended :rtype: str """ |
# hash_algo will be something like SHA256, but we need pgp-sha256.
algo = gpg.core.hash_algo_name(hash_algo)
if algo is None:
raise GPGProblem('Unknown hash algorithm {}'.format(algo),
code=GPGCode.INVALID_HASH_ALGORITHM)
return 'pgp-' + algo.lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_keys(hint=None, private=False):
""" Returns a generator of all keys containing the fingerprint, or all keys if hint is None. The generator may raise exceptions of :class:gpg.errors.GPGMEError, and it is the caller's responsibility to handle them. :param hint: Part of a fingerprint to usee to search :type hint: str or None :param private: Whether to return public keys or secret keys :type private: bool :returns: A generator that yields keys. :rtype: Generator[gpg.gpgme.gpgme_key_t, None, None] """ |
ctx = gpg.core.Context()
return ctx.keylist(hint, private) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detached_signature_for(plaintext_str, keys):
""" Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str] """ |
ctx = gpg.core.Context(armor=True)
ctx.signers = keys
(sigblob, sign_result) = ctx.sign(plaintext_str,
mode=gpg.constants.SIG_MODE_DETACH)
return sign_result.signatures, sigblob |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encrypt(plaintext_str, keys):
"""Encrypt data and return the encrypted form. :param bytes plaintext_str: the mail to encrypt :param key: optionally, a list of keys to encrypt with :type key: list[gpg.gpgme.gpgme_key_t] or None :returns: encrypted mail :rtype: str """ |
assert keys, 'Must provide at least one key to encrypt with'
ctx = gpg.core.Context(armor=True)
out = ctx.encrypt(plaintext_str, recipients=keys, sign=False,
always_trust=True)[0]
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bad_signatures_to_str(error):
""" Convert a bad signature exception to a text message. This is a workaround for gpg not handling non-ascii data correctly. :param BadSignatures error: BadSignatures exception """ |
return ", ".join("{}: {}".format(s.fpr,
"Bad signature for key(s)")
for s in error.result.signatures
if s.status != NO_ERROR) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verify_detached(message, signature):
"""Verifies whether the message is authentic by checking the signature. :param bytes message: The message to be verified, in canonical form. :param bytes signature: the OpenPGP signature to verify :returns: a list of signatures :rtype: list[gpg.results.Signature] :raises alot.errors.GPGProblem: if the verification fails """ |
ctx = gpg.core.Context()
try:
verify_results = ctx.verify(message, signature)[1]
return verify_results.signatures
except gpg.errors.BadSignatures as e:
raise GPGProblem(bad_signatures_to_str(e), code=GPGCode.BAD_SIGNATURE)
except gpg.errors.GPGMEError as e:
raise GPGProblem(str(e), code=e.getcode()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_key(key, sign=False, encrypt=False):
"""Assert that a key is valide and optionally that it can be used for signing or encrypting. Raise GPGProblem otherwise. :param key: the GPG key to check :type key: gpg.gpgme._gpgme_key :param sign: whether the key should be able to sign :type sign: bool :param encrypt: whether the key should be able to encrypt :type encrypt: bool :raises ~alot.errors.GPGProblem: If the key is revoked, expired, or invalid :raises ~alot.errors.GPGProblem: If encrypt is true and the key cannot be used to encrypt :raises ~alot.errors.GPGProblem: If sign is true and th key cannot be used to encrypt """ |
if key.revoked:
raise GPGProblem('The key "{}" is revoked.'.format(key.uids[0].uid),
code=GPGCode.KEY_REVOKED)
elif key.expired:
raise GPGProblem('The key "{}" is expired.'.format(key.uids[0].uid),
code=GPGCode.KEY_EXPIRED)
elif key.invalid:
raise GPGProblem('The key "{}" is invalid.'.format(key.uids[0].uid),
code=GPGCode.KEY_INVALID)
if encrypt and not key.can_encrypt:
raise GPGProblem(
'The key "{}" cannot be used to encrypt'.format(key.uids[0].uid),
code=GPGCode.KEY_CANNOT_ENCRYPT)
if sign and not key.can_sign:
raise GPGProblem(
'The key "{}" cannot be used to sign'.format(key.uids[0].uid),
code=GPGCode.KEY_CANNOT_SIGN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def attr_triple(value):
""" Check that interprets the value as `urwid.AttrSpec` triple for the colour modes 1,16 and 256. It assumes a <6 tuple of attribute strings for mono foreground, mono background, 16c fg, 16c bg, 256 fg and 256 bg respectively. If any of these are missing, we downgrade to the next lower available pair, defaulting to 'default'. :raises: VdtValueTooLongError, VdtTypeError :rtype: triple of `urwid.AttrSpec` """ |
keys = ['dfg', 'dbg', '1fg', '1bg', '16fg', '16bg', '256fg', '256bg']
acc = {}
if not isinstance(value, (list, tuple)):
value = value,
if len(value) > 6:
raise VdtValueTooLongError(value)
# ensure we have exactly 6 attribute strings
attrstrings = (value + (6 - len(value)) * [None])[:6]
# add fallbacks for the empty list
attrstrings = (2 * ['default']) + attrstrings
for i, value in enumerate(attrstrings):
if value:
acc[keys[i]] = value
else:
acc[keys[i]] = acc[keys[i - 2]]
try:
mono = AttrSpec(acc['1fg'], acc['1bg'], 1)
normal = AttrSpec(acc['16fg'], acc['16bg'], 16)
high = AttrSpec(acc['256fg'], acc['256bg'], 256)
except AttrSpecError as e:
raise ValidateError(str(e))
return mono, normal, high |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gpg_key(value):
""" test if value points to a known gpg key and return that key as a gpg key object. """ |
try:
return crypto.get_key(value)
except GPGProblem as e:
raise ValidateError(str(e)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_att(a, fallback):
""" replace '' and 'default' by fallback values """ |
if a is None:
return fallback
if a.background in ['default', '']:
bg = fallback.background
else:
bg = a.background
if a.foreground in ['default', '']:
fg = fallback.foreground
else:
fg = a.foreground
return AttrSpec(fg, bg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relevant_part(self, original, pos, sep=' '):
""" calculates the subword in a `sep`-splitted list of substrings of `original` that `pos` is ia.n """ |
start = original.rfind(sep, 0, pos) + 1
end = original.find(sep, pos - 1)
if end == -1:
end = len(original)
return original[start:end], start, end, pos - start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def relevant_part(self, original, pos):
""" calculates the subword of `original` that `pos` is in """ |
start = original.rfind(self._separator, 0, pos)
if start == -1:
start = 0
else:
start = start + len(self._separator)
end = original.find(self._separator, pos - 1)
if end == -1:
end = len(original)
return original[start:end], start, end, pos - start |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_context(line, pos):
""" computes start and end position of substring of line that is the command string under given position """ |
commands = split_commandline(line) + ['']
i = 0
start = 0
end = len(commands[i])
while pos > end:
i += 1
start = end + 1
end += 1 + len(commands[i])
return start, end |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _path_factory(check):
"""Create a function that checks paths.""" |
@functools.wraps(check)
def validator(paths):
if isinstance(paths, str):
check(paths)
elif isinstance(paths, collections.Sequence):
for path in paths:
check(path)
else:
raise Exception('expected either basestr or sequenc of basstr')
return validator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optional_file_like(path):
"""Validator that ensures that if a file exists it regular, a fifo, or a character device. The file is not required to exist. This includes character special devices like /dev/null. """ |
if (os.path.exists(path) and not (os.path.isfile(path) or
stat.S_ISFIFO(os.stat(path).st_mode) or
stat.S_ISCHR(os.stat(path).st_mode))):
raise ValidationFailed(
'{} is not a valid file, character device, or fifo.'.format(path)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filename(self):
""" return name of attached file. If the content-disposition header contains no file name, this returns `None` """ |
fname = self.part.get_filename()
if fname:
extracted_name = decode_header(fname)
if extracted_name:
return os.path.basename(extracted_name)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_content_type(self):
"""mime type of the attachment part""" |
ctype = self.part.get_content_type()
# replace underspecified mime description by a better guess
if ctype in ['octet/stream', 'application/octet-stream',
'application/octetstream']:
ctype = guess_mimetype(self.get_data())
return ctype |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mime_representation(self):
"""returns mime part that constitutes this attachment""" |
part = deepcopy(self.part)
part.set_param('maxlinelen', '78', header='Content-Disposition')
return part |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attribute(self, colourmode, mode, name, part=None):
""" returns requested attribute :type mode: str :param name: of the atttribute :type name: str :param colourmode: colour mode; in [1, 16, 256] :type colourmode: int :rtype: urwid.AttrSpec """ |
thmble = self._config[mode][name]
if part is not None:
thmble = thmble[part]
thmble = thmble or DUMMYDEFAULT
return thmble[self._colours.index(colourmode)] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_threadline_theming(self, thread, colourmode):
""" look up how to display a Threadline wiidget in search mode for a given thread. :param thread: Thread to theme Threadline for :type thread: alot.db.thread.Thread :param colourmode: colourmode to use, one of 1,16,256. :type colourmode: int This will return a dict mapping :normal: to `urwid.AttrSpec`, :focus: to `urwid.AttrSpec`, :parts: to a list of strings indentifying subwidgets to be displayed in this order. Moreover, for every part listed this will map 'part' to a dict mapping :normal: to `urwid.AttrSpec`, :focus: to `urwid.AttrSpec`, :width: to a tuple indicating the width of the subpart. This is either `('fit', min, max)` to force the widget to be at least `min` and at most `max` characters wide, or `('weight', n)` which makes it share remaining space with other 'weight' parts. :alignment: where to place the content if shorter than the widget. This is either 'right', 'left' or 'center'. """ |
def pickcolour(triple):
return triple[self._colours.index(colourmode)]
def matches(sec, thread):
if sec.get('tagged_with') is not None:
if not set(sec['tagged_with']).issubset(thread.get_tags()):
return False
if sec.get('query') is not None:
if not thread.matches(sec['query']):
return False
return True
default = self._config['search']['threadline']
match = default
candidates = self._config['search'].sections
for candidatename in candidates:
candidate = self._config['search'][candidatename]
if (candidatename.startswith('threadline') and
(not candidatename == 'threadline') and
matches(candidate, thread)):
match = candidate
break
# fill in values
res = {}
res['normal'] = pickcolour(match.get('normal') or default['normal'])
res['focus'] = pickcolour(match.get('focus') or default['focus'])
res['parts'] = match.get('parts') or default['parts']
for part in res['parts']:
defaultsec = default.get(part)
partsec = match.get(part) or {}
def fill(key, fallback=None):
pvalue = partsec.get(key) or defaultsec.get(key)
return pvalue or fallback
res[part] = {}
res[part]['width'] = fill('width', ('fit', 0, 0))
res[part]['alignment'] = fill('alignment', 'right')
res[part]['normal'] = pickcolour(fill('normal'))
res[part]['focus'] = pickcolour(fill('focus'))
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def apply_commandline(self, cmdline):
""" interprets a command line string i.e., splits it into separate command strings, instanciates :class:`Commands <alot.commands.Command>` accordingly and applies then in sequence. :param cmdline: command line to interpret :type cmdline: str """ |
# remove initial spaces
cmdline = cmdline.lstrip()
# we pass Commands one by one to `self.apply_command`.
# To properly call them in sequence, even if they trigger asyncronous
# code (return Deferreds), these applications happen in individual
# callback functions which are then used as callback chain to some
# trivial Deferred that immediately calls its first callback. This way,
# one callback may return a Deferred and thus postpone the application
# of the next callback (and thus Command-application)
def apply_this_command(cmdstring):
logging.debug('%s command string: "%s"', self.mode, str(cmdstring))
# translate cmdstring into :class:`Command`
cmd = commandfactory(cmdstring, self.mode)
# store cmdline for use with 'repeat' command
if cmd.repeatable:
self.last_commandline = cmdline
return self.apply_command(cmd)
try:
for c in split_commandline(cmdline):
await apply_this_command(c)
except Exception as e:
self._error_handler(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def paused(self):
""" context manager that pauses the UI to allow running external commands. If an exception occurs, the UI will be started before the exception is re-raised. """ |
self.mainloop.stop()
try:
yield
finally:
self.mainloop.start()
# make sure urwid renders its canvas at the correct size
self.mainloop.screen_size = None
self.mainloop.draw_screen() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deep_focus(self, startfrom=None):
"""return the bottom most focussed widget of the widget tree""" |
if not startfrom:
startfrom = self.current_buffer
if 'get_focus' in dir(startfrom):
focus = startfrom.get_focus()
if isinstance(focus, tuple):
focus = focus[0]
if isinstance(focus, urwid.Widget):
return self.get_deep_focus(startfrom=focus)
return startfrom |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.