text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scrape_links(self, text, context=False):
'''Convenience function for scraping from a text string.'''
return self.iter_processed_links(io.StringIO(text), context=context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scrape(self, request, response, link_type=None):
'''Iterate the scrapers, returning the first of the results.'''
for scraper in self._document_scrapers:
scrape_result = scraper.scrape(request, response, link_type)
if scrape_result is None:
continue
if scrape_result.link_contexts:
return scrape_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scrape_info(self, request, response, link_type=None):
'''Iterate the scrapers and return a dict of results.
Returns:
dict: A dict where the keys are the scrapers instances and the
values are the results. That is, a mapping from
:class:`BaseDocumentScraper` to :class:`ScrapeResult`.
'''
info = {}
for scraper in self._document_scrapers:
scrape_result = scraper.scrape(request, response, link_type)
info[scraper] = scrape_result
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def feed(self, data_len, feed_time=None):
'''Update the bandwidth meter.
Args:
data_len (int): The number of bytes transfered since the last
call to :func:`feed`.
feed_time (float): Current time.
'''
self._bytes_transferred += data_len
self._collected_bytes_transferred += data_len
time_now = feed_time or time.time()
time_diff = time_now - self._last_feed_time
if time_diff < self._sample_min_time:
return
self._last_feed_time = time.time()
if data_len == 0 and time_diff >= self._stall_time:
self._stalled = True
return
self._samples.append((time_diff, self._collected_bytes_transferred))
self._collected_bytes_transferred = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def speed(self):
'''Return the current transfer speed.
Returns:
int: The speed in bytes per second.
'''
if self._stalled:
return 0
time_sum = 0
data_len_sum = 0
for time_diff, data_len in self._samples:
time_sum += time_diff
data_len_sum += data_len
if time_sum:
return data_len_sum / time_sum
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_info(self):
'''Return ResourceInfo instances.'''
if self._min_disk:
for path in self._resource_paths:
usage = psutil.disk_usage(path)
yield ResourceInfo(path, usage.free, self._min_disk)
if self._min_memory:
usage = psutil.virtual_memory()
yield ResourceInfo(None, usage.available, self._min_memory) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def check(self):
'''Check resource levels.
Returns:
None, ResourceInfo: If None is provided, no levels are exceeded.
Otherwise, the first ResourceInfo exceeding limits is returned.
'''
for info in self.get_info():
if info.free < info.limit:
return info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load(self, response):
'''Load the response and increment the counter.
Args:
response (:class:`.http.request.Response`): The response from
a previous request.
'''
self._response = response
if self.next_location(raw=True):
self._num_redirects += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def next_location(self, raw=False):
'''Returns the next location.
Args:
raw (bool): If True, the original string contained in the Location
field will be returned. Otherwise, the URL will be
normalized to a complete URL.
Returns:
str, None: If str, the location. Otherwise, no next location.
'''
if self._response:
location = self._response.fields.get('location')
if not location or raw:
return location
return wpull.url.urljoin(self._response.request.url_info.url,
location) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_redirect(self):
'''Return whether the response contains a redirect code.'''
if self._response:
status_code = self._response.status_code
return status_code in self._codes \
or status_code in self._repeat_codes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_resolver(cls, session: AppSession):
'''Build resolver.'''
args = session.args
dns_timeout = args.dns_timeout
if args.timeout:
dns_timeout = args.timeout
if args.inet_family == 'IPv4':
family = IPFamilyPreference.ipv4_only
elif args.inet_family == 'IPv6':
family = IPFamilyPreference.ipv6_only
elif args.prefer_family == 'IPv6':
family = IPFamilyPreference.prefer_ipv6
elif args.prefer_family == 'IPv4':
family = IPFamilyPreference.prefer_ipv4
else:
family = IPFamilyPreference.any
return session.factory.new(
'Resolver',
family=family,
timeout=dns_timeout,
rotate=args.rotate_dns,
cache=session.factory.class_map['Resolver'].new_cache() if args.dns_cache else None,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_connection_pool(cls, session: AppSession):
'''Create connection pool.'''
args = session.args
connect_timeout = args.connect_timeout
read_timeout = args.read_timeout
if args.timeout:
connect_timeout = read_timeout = args.timeout
if args.limit_rate:
bandwidth_limiter = session.factory.new('BandwidthLimiter',
args.limit_rate)
else:
bandwidth_limiter = None
connection_factory = functools.partial(
Connection,
timeout=read_timeout,
connect_timeout=connect_timeout,
bind_host=session.args.bind_address,
bandwidth_limiter=bandwidth_limiter,
)
ssl_connection_factory = functools.partial(
SSLConnection,
timeout=read_timeout,
connect_timeout=connect_timeout,
bind_host=session.args.bind_address,
ssl_context=session.ssl_context,
)
if not session.args.no_proxy:
if session.args.https_proxy:
http_proxy = session.args.http_proxy.split(':', 1)
proxy_ssl = True
elif session.args.http_proxy:
http_proxy = session.args.http_proxy.split(':', 1)
proxy_ssl = False
else:
http_proxy = None
proxy_ssl = None
if http_proxy:
http_proxy[1] = int(http_proxy[1])
if session.args.proxy_user:
authentication = (session.args.proxy_user,
session.args.proxy_password)
else:
authentication = None
session.factory.class_map['ConnectionPool'] = \
HTTPProxyConnectionPool
host_filter = session.factory.new(
'ProxyHostFilter',
accept_domains=session.args.proxy_domains,
reject_domains=session.args.proxy_exclude_domains,
accept_hostnames=session.args.proxy_hostnames,
reject_hostnames=session.args.proxy_exclude_hostnames
)
return session.factory.new(
'ConnectionPool',
http_proxy,
proxy_ssl=proxy_ssl,
authentication=authentication,
resolver=session.factory['Resolver'],
connection_factory=connection_factory,
ssl_connection_factory=ssl_connection_factory,
host_filter=host_filter,
)
return session.factory.new(
'ConnectionPool',
resolver=session.factory['Resolver'],
connection_factory=connection_factory,
ssl_connection_factory=ssl_connection_factory
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_all(self):
'''Convert all links in URL table.'''
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_chunk_header(self):
'''Read a single chunk's header.
Returns:
tuple: 2-item tuple with the size of the content in the chunk and
the raw header byte string.
Coroutine.
'''
# _logger.debug('Reading chunk.')
try:
chunk_size_hex = yield from self._connection.readline()
except ValueError as error:
raise ProtocolError(
'Invalid chunk size: {0}'.format(error)) from error
if not chunk_size_hex.endswith(b'\n'):
raise NetworkError('Connection closed.')
try:
chunk_size = int(chunk_size_hex.split(b';', 1)[0].strip(), 16)
except ValueError as error:
raise ProtocolError(
'Invalid chunk size: {0}'.format(error)) from error
if chunk_size < 0:
raise ProtocolError('Chunk size cannot be negative.')
self._chunk_size = self._bytes_left = chunk_size
return chunk_size, chunk_size_hex |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_chunk_body(self):
'''Read a fragment of a single chunk.
Call :meth:`read_chunk_header` first.
Returns:
tuple: 2-item tuple with the content data and raw data.
First item is empty bytes string when chunk is fully read.
Coroutine.
'''
# chunk_size = self._chunk_size
bytes_left = self._bytes_left
# _logger.debug(__('Getting chunk size={0}, remain={1}.',
# chunk_size, bytes_left))
if bytes_left > 0:
size = min(bytes_left, self._read_size)
data = yield from self._connection.read(size)
self._bytes_left -= len(data)
return (data, data)
elif bytes_left < 0:
raise ProtocolError('Chunked-transfer overrun.')
elif bytes_left:
raise NetworkError('Connection closed.')
newline_data = yield from self._connection.readline()
if len(newline_data) > 2:
# Should be either CRLF or LF
# This could our problem or the server's problem
raise ProtocolError('Error reading newline after chunk.')
self._chunk_size = self._bytes_left = None
return (b'', newline_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_trailer(self):
'''Read the HTTP trailer fields.
Returns:
bytes: The trailer data.
Coroutine.
'''
_logger.debug('Reading chunked trailer.')
trailer_data_list = []
while True:
trailer_data = yield from self._connection.readline()
trailer_data_list.append(trailer_data)
if not trailer_data.strip():
break
return b''.join(trailer_data_list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_version_tuple(string):
'''Return a version tuple from a string.'''
match = re.match(r'(\d+)\.(\d+)\.?(\d*)([abc]?)(\d*)', string)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3) or 0)
level = RELEASE_LEVEL_MAP.get(match.group(4), 'final')
serial = int(match.group(5) or 0)
return major, minor, patch, level, serial |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _read_input_urls(cls, session: AppSession, default_scheme='http'):
'''Read the URLs provided by the user.'''
url_string_iter = session.args.urls or ()
# FIXME: url rewriter isn't created yet
url_rewriter = session.factory.get('URLRewriter')
if session.args.input_file:
if session.args.force_html:
lines = cls._input_file_as_html_links(session)
else:
lines = cls._input_file_as_lines(session)
url_string_iter = itertools.chain(url_string_iter, lines)
base_url = session.args.base
for url_string in url_string_iter:
_logger.debug(__('Parsing URL {0}', url_string))
if base_url:
url_string = wpull.url.urljoin(base_url, url_string)
try:
url_info = wpull.url.URLInfo.parse(
url_string, default_scheme=default_scheme)
_logger.debug(__('Parsed URL {0}', url_info))
if url_rewriter:
# TODO: this logic should be a hook
url_info = url_rewriter.rewrite(url_info)
_logger.debug(__('Rewritten URL {0}', url_info))
yield url_info
except ValueError as e:
_logger.info(__('Invalid URL {0}: {1}', url_string, e)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _input_file_as_lines(cls, session: AppSession):
'''Read lines from input file and return them.'''
if session.args.input_file == sys.stdin:
input_file = session.args.input_file
else:
reader = codecs.getreader(session.args.local_encoding or 'utf-8')
input_file = reader(session.args.input_file)
return input_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _input_file_as_html_links(cls, session: AppSession):
'''Read input file as HTML and return the links.'''
scrape_result = session.factory['HTMLScraper'].scrape_file(
session.args.input_file,
encoding=session.args.local_encoding or 'utf-8'
)
for context in scrape_result.link_contexts:
yield context.link |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _new_initial_request(self, with_body: bool=True):
'''Return a new Request to be passed to the Web Client.'''
url_record = self._item_session.url_record
url_info = url_record.url_info
request = self._item_session.app_session.factory['WebClient'].request_factory(url_info.url)
self._populate_common_request(request)
if with_body:
if url_record.post_data or self._processor.fetch_params.post_data:
self._add_post_data(request)
if self._file_writer_session:
request = self._file_writer_session.process_request(request)
return request |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _populate_common_request(self, request):
'''Populate the Request with common fields.'''
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and not request.fields.get('Referer'):
self._add_referrer(request, url_record)
if self._fetch_rule.http_login:
request.username, request.password = self._fetch_rule.http_login |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _add_referrer(cls, request: Request, url_record: URLRecord):
'''Add referrer URL to request.'''
# Prohibit leak of referrer from HTTPS to HTTP
# rfc7231 section 5.5.2.
if url_record.parent_url.startswith('https://') and \
url_record.url_info.scheme == 'http':
return
request.fields['Referer'] = url_record.parent_url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_robots(self):
'''Process robots.txt.
Coroutine.
'''
try:
self._item_session.request = request = self._new_initial_request(with_body=False)
verdict, reason = (yield from self._should_fetch_reason_with_robots(
request))
except REMOTE_ERRORS as error:
_logger.error(
_('Fetching robots.txt for ‘{url}’ '
'encountered an error: {error}'),
url=self._next_url_info.url, error=error
)
self._result_rule.handle_error(self._item_session, error)
wait_time = self._result_rule.get_wait_time(
self._item_session, error=error
)
if wait_time:
_logger.debug('Sleeping {0}.', wait_time)
yield from asyncio.sleep(wait_time)
return False
else:
_logger.debug('Robots filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_loop(self):
'''Fetch URL including redirects.
Coroutine.
'''
while not self._web_client_session.done():
self._item_session.request = self._web_client_session.next_request()
verdict, reason = self._should_fetch_reason()
_logger.debug('Filter verdict {} reason {}', verdict, reason)
if not verdict:
self._item_session.skip()
break
exit_early, wait_time = yield from self._fetch_one(cast(Request, self._item_session.request))
if wait_time:
_logger.debug('Sleeping {}', wait_time)
yield from asyncio.sleep(wait_time)
if exit_early:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _next_url_info(self) -> URLInfo:
'''Return the next URLInfo to be processed.
This returns either the original URLInfo or the next URLinfo
containing the redirect link.
'''
if not self._web_client_session:
return self._item_session.url_record.url_info
return self._web_client_session.next_request().url_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _should_fetch_reason(self) -> Tuple[bool, str]:
'''Return info about whether the URL should be fetched.
Returns:
tuple: A two item tuple:
1. bool: If True, the URL should be fetched.
2. str: A short reason string explaining the verdict.
'''
is_redirect = False
if self._strong_redirects:
try:
is_redirect = self._web_client_session.redirect_tracker\
.is_redirect()
except AttributeError:
pass
return self._fetch_rule.check_subsequent_web_request(
self._item_session, is_redirect=is_redirect) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]:
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
result = yield from \
self._fetch_rule.check_initial_web_request(self._item_session, request)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _add_post_data(self, request: Request):
'''Add data to the payload.'''
if self._item_session.url_record.post_data:
data = wpull.string.to_bytes(self._item_session.url_record.post_data)
else:
data = wpull.string.to_bytes(
self._processor.fetch_params.post_data
)
request.method = 'POST'
request.fields['Content-Type'] = 'application/x-www-form-urlencoded'
request.fields['Content-Length'] = str(len(data))
_logger.debug('Posting with data {0}.', data)
if not request.body:
request.body = Body(io.BytesIO())
with wpull.util.reset_file_offset(request.body):
request.body.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def int_0_inf(cls, string):
'''Convert string to int.
If ``inf`` is supplied, it returns ``0``.
'''
if string == 'inf':
return 0
try:
value = int(string)
except ValueError as error:
raise argparse.ArgumentTypeError(error)
if value < 0:
raise argparse.ArgumentTypeError(_('Value must not be negative.'))
else:
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def int_bytes(cls, string):
'''Convert string describing size to int.'''
if string[-1] in ('k', 'm'):
value = cls.int_0_inf(string[:-1])
unit = string[-1]
if unit == 'k':
value *= 2 ** 10
else:
value *= 2 ** 20
return value
else:
return cls.int_0_inf(string) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def comma_list(cls, string):
'''Convert a comma separated string to list.'''
items = string.split(',')
items = list([item.strip() for item in items])
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def comma_choice_list(cls, string):
'''Convert a comma separated string to `CommaChoiceListArgs`.'''
items = string.split(',')
items = CommaChoiceListArgs([item.strip() for item in items])
return items |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_links(self, file, encoding=None):
'''Return an iterator of links found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
Returns:
iterable: str
'''
return [item[0] for item in self.iter_text(file, encoding) if item[1]] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_file_writer(cls, session: AppSession):
'''Create the File Writer.
Returns:
FileWriter: An instance of :class:`.writer.BaseFileWriter`.
'''
args = session.args
if args.delete_after:
return session.factory.new('FileWriter') # is a NullWriter
elif args.output_document:
session.factory.class_map['FileWriter'] = SingleDocumentWriter
return session.factory.new('FileWriter', args.output_document,
headers_included=args.save_headers)
use_dir = (len(args.urls) != 1 or args.page_requisites
or args.recursive)
if args.use_directories == 'force':
use_dir = True
elif args.use_directories == 'no':
use_dir = False
os_type = 'windows' if 'windows' in args.restrict_file_names \
else 'unix'
ascii_only = 'ascii' in args.restrict_file_names
no_control = 'nocontrol' not in args.restrict_file_names
if 'lower' in args.restrict_file_names:
case = 'lower'
elif 'upper' in args.restrict_file_names:
case = 'upper'
else:
case = None
path_namer = session.factory.new(
'PathNamer',
args.directory_prefix,
index=args.default_page,
use_dir=use_dir,
cut=args.cut_dirs,
protocol=args.protocol_directories,
hostname=args.host_directories,
os_type=os_type,
ascii_only=ascii_only,
no_control=no_control,
case=case,
max_filename_length=args.max_filename_length,
)
if args.recursive or args.page_requisites or args.continue_download:
if args.clobber_method == 'disable':
file_class = OverwriteFileWriter
else:
file_class = IgnoreFileWriter
elif args.timestamping:
file_class = TimestampingFileWriter
else:
file_class = AntiClobberFileWriter
session.factory.class_map['FileWriter'] = file_class
return session.factory.new(
'FileWriter',
path_namer,
file_continuing=args.continue_download,
headers_included=args.save_headers,
local_timestamping=args.use_server_timestamps,
adjust_extension=args.adjust_extension,
content_disposition=args.content_disposition,
trust_server_names=args.trust_server_names,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def setup_signal_handlers(self):
'''Setup Ctrl+C and SIGTERM handlers.'''
if platform.system() == 'Windows':
_logger.warning(_(
'Graceful stopping with Unix signals is not supported '
'on this OS.'
))
return
event_loop = asyncio.get_event_loop()
graceful_called = False
def graceful_stop_callback():
nonlocal graceful_called
if graceful_called:
forceful_stop_callback()
return
graceful_called = True
_logger.info(_('Stopping once all requests complete...'))
_logger.info(_('Interrupt again to force stopping immediately.'))
self.stop()
def forceful_stop_callback():
_logger.info(_('Forcing immediate stop...'))
logging.raiseExceptions = False
event_loop.stop()
event_loop.add_signal_handler(signal.SIGINT, graceful_stop_callback)
event_loop.add_signal_handler(signal.SIGTERM, forceful_stop_callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _update_exit_code_from_error(self, error):
'''Set the exit code based on the error type.
Args:
error (:class:`Exception`): An exception instance.
'''
for error_type, exit_code in self.ERROR_CODE_MAP.items():
if isinstance(error, error_type):
self.update_exit_code(exit_code)
break
else:
self.update_exit_code(ExitStatus.generic_error) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def update_exit_code(self, code: int):
'''Set the exit code if it is serious than before.
Args:
code: The exit code.
'''
if code:
if self._exit_code:
self._exit_code = min(self._exit_code, code)
else:
self._exit_code = code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_robots_txt(self, request: HTTPRequest) -> bool:
'''Consult by fetching robots.txt as needed.
Args:
request: The request to be made
to get the file.
Returns:
True if can fetch
Coroutine
'''
if not self._robots_txt_checker:
return True
result = yield from self._robots_txt_checker.can_fetch(request)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]:
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_hook(self, item_session: ItemSession, verdict: bool,
reason: str, test_info: dict):
'''Consult the scripting hook.
Returns:
tuple: (bool, str)
'''
try:
reasons = {
'filters': test_info['map'],
'reason': reason,
}
verdict = self.hook_dispatcher.call(
PluginFunctions.accept_url, item_session, verdict, reasons,
)
reason = 'callback_hook'
except HookDisconnected:
pass
return verdict, reason |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def check_initial_web_request(self, item_session: ItemSession, request: HTTPRequest) -> Tuple[bool, str]:
'''Check robots.txt, URL filters, and scripting hook.
Returns:
tuple: (bool, str)
Coroutine.
'''
verdict, reason, test_info = self.consult_filters(item_session.request.url_info, item_session.url_record)
if verdict and self._robots_txt_checker:
can_fetch = yield from self.consult_robots_txt(request)
if not can_fetch:
verdict = False
reason = 'robotstxt'
verdict, reason = self.consult_hook(
item_session, verdict, reason, test_info
)
return verdict, reason |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_pre_response(self, item_session: ItemSession) -> Actions:
'''Process a response that is starting.'''
action = self.consult_pre_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.skipped)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_document(self, item_session: ItemSession, filename: str) -> Actions:
'''Process a successful document response.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
self._statistics.increment(item_session.response.body.size())
item_session.set_status(Status.done, filename=filename)
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_no_document(self, item_session: ItemSession) -> Actions:
'''Callback for successful responses containing no useful document.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.skipped)
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_intermediate_response(self, item_session: ItemSession) -> Actions:
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_document_error(self, item_session: ItemSession) -> Actions:
'''Callback for when the document only describes an server error.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.increment()
self._statistics.errors[ServerError] += 1
action = self.handle_response(item_session)
if action == Actions.NORMAL:
item_session.set_status(Status.error)
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_response(self, item_session: ItemSession) -> Actions:
'''Generic handler for a response.
Returns:
A value from :class:`.hook.Actions`.
'''
action = self.consult_response_hook(item_session)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def handle_error(self, item_session: ItemSession, error: BaseException) -> Actions:
'''Process an error.
Returns:
A value from :class:`.hook.Actions`.
'''
if not self._ssl_verification and \
isinstance(error, SSLVerificationError):
# Change it into a different error since the user doesn't care
# about verifying certificates
self._statistics.increment_error(ProtocolError())
else:
self._statistics.increment_error(error)
self._waiter.increment()
action = self.consult_error_hook(item_session, error)
if action == Actions.RETRY:
item_session.set_status(Status.error)
elif action == Actions.FINISH:
item_session.set_status(Status.done)
elif action == Actions.STOP:
raise HookStop('Script requested immediate stop.')
elif self._ssl_verification and isinstance(error, SSLVerificationError):
raise
elif isinstance(error, ConnectionRefused) and \
not self.retry_connrefused:
item_session.set_status(Status.skipped)
elif isinstance(error, DNSNotFound) and \
not self.retry_dns_error:
item_session.set_status(Status.skipped)
else:
item_session.set_status(Status.error)
return action |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_wait_time(self, item_session: ItemSession, error=None):
'''Return the wait time in seconds between requests.'''
seconds = self._waiter.get()
try:
return self.hook_dispatcher.call(PluginFunctions.wait_time, seconds,
item_session, error)
except HookDisconnected:
return seconds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception]=None) -> float:
'''Return the wait time between requests.
Args:
seconds: The original time in seconds.
item_session:
error:
Returns:
The time in seconds.
'''
return seconds |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_pre_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response begins.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_pre_response,
item_session
)
except HookDisconnected:
return Actions.NORMAL |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_response_hook(self, item_session: ItemSession) -> Actions:
'''Return scripting action when a response ends.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_response, item_session
)
except HookDisconnected:
return Actions.NORMAL |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def consult_error_hook(self, item_session: ItemSession, error: BaseException):
'''Return scripting action when an error occured.'''
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_error, item_session, error)
except HookDisconnected:
return Actions.NORMAL |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_extra_urls(self, item_session: ItemSession):
'''Add additional URLs such as robots.txt, favicon.ico.'''
if item_session.url_record.level == 0 and self._sitemaps:
extra_url_infos = (
self.parse_url(
'{0}://{1}/robots.txt'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
),
self.parse_url(
'{0}://{1}/sitemap.xml'.format(
item_session.url_record.url_info.scheme,
item_session.url_record.url_info.hostname_with_port)
)
)
for url_info in extra_url_infos:
item_session.add_child_url(url_info.url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scrape_document(self, item_session: ItemSession):
'''Process document for links.'''
self.event_dispatcher.notify(
PluginFunctions.get_urls, item_session
)
if not self._document_scraper:
return
demux_info = self._document_scraper.scrape_info(
item_session.request, item_session.response,
item_session.url_record.link_type
)
num_inline_urls = 0
num_linked_urls = 0
for scraper, scrape_result in demux_info.items():
new_inline, new_linked = self._process_scrape_info(
scraper, scrape_result, item_session
)
num_inline_urls += new_inline
num_linked_urls += new_linked
_logger.debug('Candidate URLs: inline={0} linked={1}',
num_inline_urls, num_linked_urls
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_scrape_info(self, scraper: BaseScraper,
scrape_result: ScrapeResult,
item_session: ItemSession):
'''Collect the URLs from the scrape info dict.'''
if not scrape_result:
return 0, 0
num_inline = 0
num_linked = 0
for link_context in scrape_result.link_contexts:
url_info = self.parse_url(link_context.link)
if not url_info:
continue
url_info = self.rewrite_url(url_info)
child_url_record = item_session.child_url_record(
url_info.url, inline=link_context.inline
)
if not self._fetch_rule.consult_filters(item_session.request.url_info, child_url_record)[0]:
continue
if link_context.inline:
num_inline += 1
else:
num_linked += 1
item_session.add_child_url(url_info.url, inline=link_context.inline,
link_type=link_context.link_type)
return num_inline, num_linked |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rewrite_url(self, url_info: URLInfo) -> URLInfo:
'''Return a rewritten URL such as escaped fragment.'''
if self._url_rewriter:
return self._url_rewriter.rewrite(url_info)
else:
return url_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_add_system(name, token, org, system, prompt):
""" Adds a new system to the repo. """ |
repo = get_repo(token=token, org=org, name=name)
try:
repo.create_label(name=system.strip(), color=SYSTEM_LABEL_COLOR)
click.secho("Successfully added new system {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except GithubException as e:
if e.status == 422:
click.secho(
"Unable to add new system {}, it already exists.".format(system), fg="yellow")
return
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_remove_system(name, token, org, system, prompt):
""" Removes a system from the repo. """ |
repo = get_repo(token=token, org=org, name=name)
try:
label = repo.get_label(name=system.strip())
label.delete()
click.secho("Successfully deleted {}".format(system), fg="green")
if prompt and click.confirm("Run update to re-generate the page?"):
run_update(name=name, token=token, org=org)
except UnknownObjectException:
click.secho("Unable to remove system {}, it does not exist.".format(system), fg="yellow") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config(repo):
""" Get the config for the repo, merged with the default config. Returns the default config if no config file is found. """ |
files = get_files(repo)
config = DEFAULT_CONFIG
if "config.json" in files:
# get the config file, parse JSON and merge it with the default config
config_file = repo.get_file_contents('/config.json', ref="gh-pages")
try:
repo_config = json.loads(config_file.decoded_content.decode("utf-8"))
config.update(repo_config)
except ValueError:
click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow")
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dispatch(self, *args, **kwargs):
""" Check that user signup is allowed before even bothering to dispatch or do other processing. """ |
if not self.registration_allowed():
return HttpResponseRedirect(force_text(self.disallowed_url))
return super(RegistrationView, self).dispatch(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_email_context(self, activation_key):
""" Build the template context used for the activation email. """ |
scheme = 'https' if self.request.is_secure() else 'http'
return {
'scheme': scheme,
'activation_key': activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': get_current_site(self.request)
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_key(self, activation_key):
""" Verify that the activation key is valid and within the permitted activation time window, returning the username if valid or raising ``ActivationError`` if not. """ |
try:
username = signing.loads(
activation_key,
salt=REGISTRATION_SALT,
max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400
)
return username
except signing.SignatureExpired:
raise ActivationError(
self.EXPIRED_MESSAGE,
code='expired'
)
except signing.BadSignature:
raise ActivationError(
self.INVALID_KEY_MESSAGE,
code='invalid_key',
params={'activation_key': activation_key}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user(self, username):
""" Given the verified username, look up and return the corresponding user account if it exists, or raising ``ActivationError`` if it doesn't. """ |
User = get_user_model()
try:
user = User.objects.get(**{
User.USERNAME_FIELD: username,
})
if user.is_active:
raise ActivationError(
self.ALREADY_ACTIVATED_MESSAGE,
code='already_activated'
)
return user
except User.DoesNotExist:
raise ActivationError(
self.BAD_USERNAME_MESSAGE,
code='bad_username'
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_confusables(value):
""" Validator which disallows 'dangerous' usernames likely to represent homograph attacks. A username is 'dangerous' if it is mixed-script (as defined by Unicode 'Script' property) and contains one or more characters appearing in the Unicode Visually Confusable Characters file. """ |
if not isinstance(value, six.text_type):
return
if confusables.is_dangerous(value):
raise ValidationError(CONFUSABLE, code='invalid') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_confusables_email(value):
""" Validator which disallows 'dangerous' email addresses likely to represent homograph attacks. An email address is 'dangerous' if either the local-part or the domain, considered on their own, are mixed-script and contain one or more characters appearing in the Unicode Visually Confusable Characters file. """ |
if '@' not in value:
return
local_part, domain = value.split('@')
if confusables.is_dangerous(local_part) or \
confusables.is_dangerous(domain):
raise ValidationError(CONFUSABLE_EMAIL, code='invalid') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minify_js_files():
""" This command minified js files with UglifyJS """ |
for k, v in JS_FILE_MAPPING.items():
input_files = " ".join(v["input_files"])
output_file = v["output_file"]
uglifyjs_command = "uglifyjs {input_files} -o {output_file}".format(
input_files=input_files,
output_file=output_file
)
local(uglifyjs_command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minify_css_files():
""" This command minified js files with UglifyCSS """ |
for k, v in CSS_FILE_MAPPING.items():
input_files = " ".join(v["input_files"])
output_file = v["output_file"]
uglifyjs_command = "uglifycss {input_files} > {output_file}".format(
input_files=input_files,
output_file=output_file
)
local(uglifyjs_command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timestamp_with_timezone(dt=None):
""" Return a timestamp with a timezone for the configured locale. If all else fails, consider localtime to be UTC. """ |
dt = dt or datetime.now()
if timezone is None:
return dt.strftime('%Y-%m-%d %H:%M%z')
if not dt.tzinfo:
tz = timezone.get_current_timezone()
if not tz:
tz = timezone.utc
dt = dt.replace(tzinfo=timezone.get_current_timezone())
return dt.strftime("%Y-%m-%d %H:%M%z") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_access_control_function():
""" Return a predicate for determining if a user can access the Rosetta views """ |
fn_path = getattr(settings, 'ROSETTA_ACCESS_CONTROL_FUNCTION', None)
if fn_path is None:
return is_superuser_staff_or_in_translators_group
# Dynamically load a permissions function
perm_module, perm_func = fn_path.rsplit('.', 1)
perm_module = importlib.import_module(perm_module)
return getattr(perm_module, perm_func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_nls(self, in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing newlines at the begging and end of the translated string with the original """ |
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ref_lang_po_file(self):
"""Return a parsed .po file object for the "reference language", if one exists, otherwise None. """ |
ref_pofile = None
if rosetta_settings.ENABLE_REFLANG and self.ref_lang != 'msgid':
replacement = '{separator}locale{separator}{ref_lang}'.format(
separator=os.sep,
ref_lang=self.ref_lang
)
pattern = '\{separator}locale\{separator}[a-z]{{2}}'.format(separator=os.sep)
ref_fn = re.sub(pattern, replacement, self.po_file_path,)
try:
ref_pofile = pofile(ref_fn)
except IOError:
# there's a syntax error in the PO file and polib can't
# open it. Let's just do nothing and thus display msgids.
# XXX: :-/
pass
return ref_pofile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_from_gps_time(gps_time, gps_week=None):
""" Convert gps time in ticks to standard time. """ |
converted_gps_time = None
gps_timestamp = float(gps_time)
if gps_week != None:
# image date
converted_gps_time = GPS_START + datetime.timedelta(seconds=int(gps_week) *
SECS_IN_WEEK + gps_timestamp)
else:
# TAI scale with 1970-01-01 00:00:10 (TAI) epoch
os.environ['TZ'] = 'right/UTC'
# by definition
gps_time_as_gps = GPS_START + \
datetime.timedelta(seconds=gps_timestamp)
# constant offset
gps_time_as_tai = gps_time_as_gps + \
datetime.timedelta(seconds=19)
tai_epoch_as_tai = datetime.datetime(1970, 1, 1, 0, 0, 10)
# by definition
tai_timestamp = (gps_time_as_tai - tai_epoch_as_tai).total_seconds()
converted_gps_time = (
datetime.datetime.utcfromtimestamp(tai_timestamp))
# "right" timezone is in effect
return converted_gps_time |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_video_duration(video_file):
"""Get video duration in seconds""" |
try:
return float(FFProbe(video_file).video[0].duration)
except Exception as e:
print("could not extract duration from video {} due to {}".format(video_file, e))
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_video_end_time(video_file):
"""Get video end time in seconds""" |
if not os.path.isfile(video_file):
print("Error, video file {} does not exist".format(video_file))
return None
try:
time_string = FFProbe(video_file).video[0].creation_time
try:
creation_time = datetime.datetime.strptime(
time_string, TIME_FORMAT)
except:
creation_time = datetime.datetime.strptime(
time_string, TIME_FORMAT_2)
except:
return None
return creation_time |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_video_start_time(video_file):
"""Get start time in seconds""" |
if not os.path.isfile(video_file):
print("Error, video file {} does not exist".format(video_file))
return None
video_end_time = get_video_end_time(video_file)
duration = get_video_duration(video_file)
if video_end_time == None or duration == None:
return None
else:
video_start_time = (
video_end_time - datetime.timedelta(seconds=duration))
return video_start_time |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _extract_alternative_fields(self, fields, default=None, field_type=float):
'''
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
'''
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0])
if field_type is str:
value = str(self.tags[field].values)
if field_type is int:
value = int(self.tags[field].values[0])
return value, field
return default, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_geo(self):
'''
Extract geo-related information from exif
'''
altitude = self.extract_altitude()
dop = self.extract_dop()
lon, lat = self.extract_lon_lat()
d = {}
if lon is not None and lat is not None:
d['latitude'] = lat
d['longitude'] = lon
if altitude is not None:
d['altitude'] = altitude
if dop is not None:
d['dop'] = dop
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_gps_time(self):
'''
Extract timestamp from GPS field.
'''
gps_date_field = "GPS GPSDate"
gps_time_field = "GPS GPSTimeStamp"
gps_time = 0
if gps_date_field in self.tags and gps_time_field in self.tags:
date = str(self.tags[gps_date_field].values).split(":")
if int(date[0]) == 0 or int(date[1]) == 0 or int(date[2]) == 0:
return None
t = self.tags[gps_time_field]
gps_time = datetime.datetime(
year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
hour=int(eval_frac(t.values[0])),
minute=int(eval_frac(t.values[1])),
second=int(eval_frac(t.values[2])),
)
microseconds = datetime.timedelta(
microseconds=int((eval_frac(t.values[2]) % 1) * 1e6))
gps_time += microseconds
return gps_time |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_exif(self):
'''
Extract a list of exif infos
'''
width, height = self.extract_image_size()
make, model = self.extract_make(), self.extract_model()
orientation = self.extract_orientation()
geo = self.extract_geo()
capture = self.extract_capture_time()
direction = self.extract_direction()
d = {
'width': width,
'height': height,
'orientation': orientation,
'direction': direction,
'make': make,
'model': model,
'capture_time': capture
}
d['gps'] = geo
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_image_size(self):
'''
Extract image height and width
'''
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_make(self):
'''
Extract camera make
'''
fields = ['EXIF LensMake', 'Image Make']
make, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return make |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_model(self):
'''
Extract camera model
'''
fields = ['EXIF LensModel', 'Image Model']
model, _ = self._extract_alternative_fields(
fields, default='none', field_type=str)
return model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_orientation(self):
'''
Extract image orientation
'''
fields = ['Image Orientation']
orientation, _ = self._extract_alternative_fields(
fields, default=1, field_type=int)
if orientation not in range(1, 9):
return 1
return orientation |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fields_exist(self, fields):
'''
Check existence of a list fields in exif
'''
for rexif in fields:
vflag = False
for subrexif in rexif:
if subrexif in self.tags:
vflag = True
if not vflag:
print("Missing required EXIF tag: {0} for image {1}".format(
rexif[0], self.filename))
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def mapillary_tag_exists(self):
'''
Check existence of required Mapillary tags
'''
description_tag = "Image ImageDescription"
if description_tag not in self.tags:
return False
for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]:
if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def query_search_api(min_lat, max_lat, min_lon, max_lon, max_results):
'''
Send query to the search API and get dict with image data.
'''
# Create URL
params = urllib.urlencode(zip(
['client_id', 'bbox', 'per_page'],
[CLIENT_ID, ','.join([str(min_lon), str(min_lat), str(
max_lon), str(max_lat)]), str(max_results)]
))
print(MAPILLARY_API_IM_SEARCH_URL + params)
# Get data from server, then parse JSON
query = urllib2.urlopen(MAPILLARY_API_IM_SEARCH_URL + params).read()
query = json.loads(query)['features']
print("Result: {0} images in area.".format(len(query)))
return query |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def download_images(query, path, size=1024):
'''
Download images in query result to path.
Return list of downloaded images with lat,lon.
There are four sizes available: 320, 640, 1024 (default), or 2048.
'''
im_size = "thumb-{0}.jpg".format(size)
im_list = []
for im in query:
# Use key to create url to download from and filename to save into
key = im['properties']['key']
url = MAPILLARY_API_IM_RETRIEVE_URL + key + '/' + im_size
filename = key + ".jpg"
try:
# Get image and save to disk
image = urllib.URLopener()
image.retrieve(url, path + filename)
# Log filename and GPS location
coords = ",".join(map(str, im['geometry']['coordinates']))
im_list.append([filename, coords])
print("Successfully downloaded: {0}".format(filename))
except KeyboardInterrupt:
break
except Exception as e:
print("Failed to download: {} due to {}".format(filename, e))
return im_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_lat_lon_time_from_gpx(gpx_file, local_time=True):
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(gpx_file, 'r') as f:
gpx = gpxpy.parse(f)
points = []
if len(gpx.tracks) > 0:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
if len(gpx.waypoints) > 0:
for point in gpx.waypoints:
t = utc_to_localtime(point.time) if local_time else point.time
points.append((t, point.latitude, point.longitude, point.elevation))
# sort by time just in case
points.sort()
return points |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_lat_lon_time_from_nmea(nmea_file, local_time=True):
'''
Read location and time stamps from a track in a NMEA file.
Returns a list of tuples (time, lat, lon).
GPX stores time in UTC, by default we assume your camera used the local time
and convert accordingly.
'''
with open(nmea_file, "r") as f:
lines = f.readlines()
lines = [l.rstrip("\n\r") for l in lines]
# Get initial date
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
break
# Parse GPS trace
points = []
for l in lines:
if "GPRMC" in l:
data = pynmea2.parse(l)
date = data.datetime.date()
if "$GPGGA" in l:
data = pynmea2.parse(l)
timestamp = datetime.datetime.combine(date, data.timestamp)
lat, lon, alt = data.latitude, data.longitude, data.altitude
points.append((timestamp, lat, lon, alt))
points.sort()
return points |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ecef_from_lla(lat, lon, alt):
'''
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
'''
a2 = WGS84_a ** 2
b2 = WGS84_b ** 2
lat = math.radians(lat)
lon = math.radians(lon)
L = 1.0 / math.sqrt(a2 * math.cos(lat) ** 2 + b2 * math.sin(lat) ** 2)
x = (a2 * L + alt) * math.cos(lat) * math.cos(lon)
y = (a2 * L + alt) * math.cos(lat) * math.sin(lon)
z = (b2 * L + alt) * math.sin(lat)
return x, y, z |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_max_distance_from_start(latlon_track):
'''
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
start_position = latlon_list[0]
max_distance = 0
for position in latlon_list:
distance = gps_distance(start_position, position)
if distance > max_distance:
max_distance = distance
return max_distance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_total_distance_traveled(latlon_track):
'''
Returns the total distance traveled of a GPS track. Used to calculate whether or not the entire sequence was just stationary video
Takes a sequence of points as input
'''
latlon_list = []
# Remove timestamps from list
for idx, point in enumerate(latlon_track):
lat = latlon_track[idx][1]
lon = latlon_track[idx][2]
alt = latlon_track[idx][3]
latlon_list.append([lat, lon, alt])
total_distance = 0
last_position = latlon_list[0]
for position in latlon_list:
total_distance += gps_distance(last_position, position)
last_position = position
return total_distance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dms_to_decimal(degrees, minutes, seconds, hemisphere):
'''
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
'''
dms = float(degrees) + float(minutes) / 60 + float(seconds) / 3600
if hemisphere in "WwSs":
dms = -1 * dms
return dms |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def decimal_to_dms(value, precision):
'''
Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF
'''
deg = math.floor(value)
min = math.floor((value - deg) * 60)
sec = math.floor((value - deg - min / 60) * 3600 * precision)
return ((deg, 1), (min, 1), (sec, precision)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) /
math.tan(start_lat / 2.0 + math.pi / 4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong) * math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - \
math.sin(start_lat) * math.cos(end_lat) * math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def diff_bearing(b1, b2):
'''
Compute difference between two bearings
'''
d = abs(b2 - b1)
d = 360 - d if d > 180 else d
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def normalize_bearing(bearing, check_hex=False):
'''
Normalize bearing and convert from hex if
'''
if bearing > 360 and check_hex:
# fix negative value wrongly parsed in exifread
# -360 degree -> 4294966935 when converting from hex
bearing = bin(int(bearing))[2:]
bearing = ''.join([str(int(int(a) == 0)) for a in bearing])
bearing = -float(int(bearing, 2))
bearing %= 360
return bearing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def interpolate_lat_lon(points, t, max_dt=1):
'''
Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
'''
# find the enclosing points in sorted list
if (t <= points[0][0]) or (t >= points[-1][0]):
if t <= points[0][0]:
dt = abs((points[0][0] - t).total_seconds())
else:
dt = (t - points[-1][0]).total_seconds()
if dt > max_dt:
raise ValueError(
"time t not in scope of gpx file by {} seconds".format(dt))
else:
print(
"time t not in scope of gpx file by {} seconds, extrapolating...".format(dt))
if t < points[0][0]:
before = points[0]
after = points[1]
else:
before = points[-2]
after = points[-1]
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if t == points[0][0]:
x = points[0]
return (x[1], x[2], bearing, x[3])
if t == points[-1][0]:
x = points[-1]
return (x[1], x[2], bearing, x[3])
else:
for i, point in enumerate(points):
if t < point[0]:
if i > 0:
before = points[i - 1]
else:
before = points[i]
after = points[i]
break
# weight based on time
weight = (t - before[0]).total_seconds() / \
(after[0] - before[0]).total_seconds()
# simple linear interpolation in case points are not the same
if before[1] == after[1]:
lat = before[1]
else:
lat = before[1] - weight * before[1] + weight * after[1]
if before[2] == after[2]:
lon = before[2]
else:
lon = before[2] - weight * before[2] + weight * after[2]
# camera angle
bearing = compute_bearing(before[1], before[2], after[1], after[2])
# altitude
if before[3] is not None:
ele = before[3] - weight * before[3] + weight * after[3]
else:
ele = None
return lat, lon, bearing, ele |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.