text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_ftp_client(cls, session: AppSession):
'''Build FTP client.'''
return session.factory.new(
'FTPClient',
connection_pool=session.factory['ConnectionPool'],
# TODO: recorder
# recorder=session.factory['DemuxRecorder'],
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def process(self, session: AppSession):
'''Build MITM proxy server.'''
args = session.args
if not (args.phantomjs or args.youtube_dl or args.proxy_server):
return
proxy_server = session.factory.new(
'HTTPProxyServer',
session.factory['HTTPClient'],
)
cookie_jar = session.factory.get('CookieJarWrapper')
proxy_coprocessor = session.factory.new(
'ProxyCoprocessor',
session
)
proxy_socket = tornado.netutil.bind_sockets(
session.args.proxy_server_port,
address=session.args.proxy_server_address
)[0]
proxy_port = proxy_socket.getsockname()[1]
proxy_async_server = yield from asyncio.start_server(proxy_server, sock=proxy_socket)
session.async_servers.append(proxy_async_server)
session.proxy_server_port = proxy_port |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_processor(cls, session: AppSession):
'''Create the Processor
Returns:
Processor: An instance of :class:`.processor.BaseProcessor`.
'''
web_processor = cls._build_web_processor(session)
ftp_processor = cls._build_ftp_processor(session)
delegate_processor = session.factory.new('Processor')
delegate_processor.register('http', web_processor)
delegate_processor.register('https', web_processor)
delegate_processor.register('ftp', ftp_processor) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_web_processor(cls, session: AppSession):
'''Build WebProcessor.'''
args = session.args
url_filter = session.factory['DemuxURLFilter']
document_scraper = session.factory['DemuxDocumentScraper']
file_writer = session.factory['FileWriter']
post_data = cls._get_post_data(session.args)
web_client = session.factory['WebClient']
robots_txt_checker = cls._build_robots_txt_checker(session)
http_username = args.user or args.http_user
http_password = args.password or args.http_password
ftp_username = args.user or args.ftp_user
ftp_password = args.password or args.ftp_password
fetch_rule = session.factory.new(
'FetchRule',
url_filter=url_filter, robots_txt_checker=robots_txt_checker,
http_login=(http_username, http_password),
ftp_login=(ftp_username, ftp_password),
duration_timeout=args.session_timeout,
)
waiter = session.factory.new(
'Waiter',
wait=args.wait,
random_wait=args.random_wait,
max_wait=args.waitretry)
result_rule = session.factory.new(
'ResultRule',
ssl_verification=args.check_certificate,
retry_connrefused=args.retry_connrefused,
retry_dns_error=args.retry_dns_error,
waiter=waiter,
statistics=session.factory['Statistics'],
)
processing_rule = session.factory.new(
'ProcessingRule',
fetch_rule,
document_scraper=document_scraper,
sitemaps=session.args.sitemaps,
url_rewriter=session.factory.get('URLRewriter'),
)
web_processor_fetch_params = session.factory.new(
'WebProcessorFetchParams',
post_data=post_data,
strong_redirects=args.strong_redirects,
content_on_error=args.content_on_error,
)
processor = session.factory.new(
'WebProcessor',
web_client,
web_processor_fetch_params,
)
return processor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_ftp_processor(cls, session: AppSession):
'''Build FTPProcessor.'''
ftp_client = session.factory['FTPClient']
fetch_params = session.factory.new(
'FTPProcessorFetchParams',
remove_listing=session.args.remove_listing,
retr_symlinks=session.args.retr_symlinks,
preserve_permissions=session.args.preserve_permissions,
glob=session.args.glob,
)
return session.factory.new(
'FTPProcessor',
ftp_client,
fetch_params,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_post_data(cls, args):
'''Return the post data.'''
if args.post_data:
return args.post_data
elif args.post_file:
return args.post_file.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_robots_txt_checker(cls, session: AppSession):
'''Build robots.txt checker.'''
if session.args.robots:
robots_txt_pool = session.factory.new('RobotsTxtPool')
robots_txt_checker = session.factory.new(
'RobotsTxtChecker',
web_client=session.factory['WebClient'],
robots_txt_pool=robots_txt_pool
)
return robots_txt_checker |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_phantomjs_coprocessor(cls, session: AppSession, proxy_port: int):
'''Build proxy server and PhantomJS client. controller, coprocessor.'''
page_settings = {}
default_headers = NameValueRecord()
for header_string in session.args.header:
default_headers.parse(header_string)
# Since we can only pass a one-to-one mapping to PhantomJS,
# we put these last since NameValueRecord.items() will use only the
# first value added for each key.
default_headers.add('Accept-Language', '*')
if not session.args.http_compression:
default_headers.add('Accept-Encoding', 'identity')
default_headers = dict(default_headers.items())
if session.args.read_timeout:
page_settings['resourceTimeout'] = session.args.read_timeout * 1000
page_settings['userAgent'] = session.args.user_agent \
or session.default_user_agent
# Test early for executable
wpull.driver.phantomjs.get_version(session.args.phantomjs_exe)
phantomjs_params = PhantomJSParams(
wait_time=session.args.phantomjs_wait,
num_scrolls=session.args.phantomjs_scroll,
smart_scroll=session.args.phantomjs_smart_scroll,
snapshot=session.args.phantomjs_snapshot,
custom_headers=default_headers,
page_settings=page_settings,
load_time=session.args.phantomjs_max_time,
)
extra_args = [
'--proxy',
'{}:{}'.format(session.args.proxy_server_address, proxy_port),
'--ignore-ssl-errors=true'
]
phantomjs_driver_factory = functools.partial(
session.factory.class_map['PhantomJSDriver'],
exe_path=session.args.phantomjs_exe,
extra_args=extra_args,
)
phantomjs_coprocessor = session.factory.new(
'PhantomJSCoprocessor',
phantomjs_driver_factory,
session.factory['ProcessingRule'],
phantomjs_params,
root_path=session.args.directory_prefix,
warc_recorder=session.factory.get('WARCRecorder'),
)
return phantomjs_coprocessor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_youtube_dl_coprocessor(cls, session: AppSession, proxy_port: int):
'''Build youtube-dl coprocessor.'''
# Test early for executable
wpull.processor.coprocessor.youtubedl.get_version(session.args.youtube_dl_exe)
coprocessor = session.factory.new(
'YoutubeDlCoprocessor',
session.args.youtube_dl_exe,
(session.args.proxy_server_address, proxy_port),
root_path=session.args.directory_prefix,
user_agent=session.args.user_agent or session.default_user_agent,
warc_recorder=session.factory.get('WARCRecorder'),
inet_family=session.args.inet_family,
# Proxy will always present a invalid MITM cert
#check_certificate=session.args.check_certificate
check_certificate=False
)
return coprocessor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def build(self) -> Application:
'''Put the application together.
'''
pipelines = self._build_pipelines()
self._factory.new('Application', pipelines)
return self._factory['Application'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_supported(cls, file=None, request=None, response=None,
url_info=None):
'''Given the hints, return whether the document is supported.
Args:
file: A file object containing the document.
request (:class:`.http.request.Request`): An HTTP request.
response (:class:`.http.request.Response`): An HTTP response.
url_info (:class:`.url.URLInfo`): A URLInfo.
Returns:
bool: If True, the reader should be able to read it.
'''
tests = (
(response, cls.is_response),
(file, cls.is_file),
(request, cls.is_request),
(url_info, cls.is_url)
)
for instance, method in tests:
if instance:
try:
result = method(instance)
except NotImplementedError:
pass
else:
if result:
return True
elif result is VeryFalse:
return VeryFalse |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _print_stats(cls, stats: Statistics, human_format_speed: bool=True):
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_no_body(request, response, no_content_codes=DEFAULT_NO_CONTENT_CODES):
'''Return whether a content body is not expected.'''
if 'Content-Length' not in response.fields \
and 'Transfer-Encoding' not in response.fields \
and (
response.status_code in no_content_codes
or request.method.upper() == 'HEAD'
):
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_request(self, request, full_url=False):
'''Send the request's HTTP status line and header fields.
This class will automatically connect the connection if the
connection is closed.
Coroutine.
'''
_logger.debug('Sending headers.')
if hasattr(request, 'prepare_for_send'):
request.prepare_for_send(full_url=full_url)
if self._ignore_length:
request.fields['Connection'] = 'close'
data = request.to_bytes()
self._data_event_dispatcher.notify_write(data)
# XXX: Connection lost is raised too early on Python 3.2, 3.3 so
# don't flush but check for connection closed on reads
yield from self._connection.write(data, drain=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_body(self, file, length=None):
'''Send the request's content body.
Coroutine.
'''
_logger.debug('Sending body.')
file_is_async = (asyncio.iscoroutine(file.read) or
asyncio.iscoroutinefunction(file.read))
_logger.debug(__('Body is async: {0}', file_is_async))
if length is not None:
bytes_left = length
while True:
if length is not None:
if bytes_left <= 0:
break
read_size = min(bytes_left, self._read_size)
else:
read_size = self._read_size
if file_is_async:
data = yield from file.read(read_size)
else:
data = file.read(read_size)
if not data:
break
self._data_event_dispatcher.notify_write(data)
if bytes_left <= self._read_size:
# XXX: Connection lost is raised too early on Python 3.2, 3.3
# so don't flush on the last chunk but check for connection
# closed on reads
drain = False
else:
drain = True
yield from self._connection.write(data, drain=drain)
if length is not None:
bytes_left -= len(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_response(self, response=None):
'''Read the response's HTTP status line and header fields.
Coroutine.
'''
_logger.debug('Reading header.')
if response is None:
response = Response()
header_lines = []
bytes_read = 0
while True:
try:
data = yield from self._connection.readline()
except ValueError as error:
raise ProtocolError(
'Invalid header: {0}'.format(error)) from error
self._data_event_dispatcher.notify_read(data)
if not data.endswith(b'\n'):
raise NetworkError('Connection closed.')
elif data in (b'\r\n', b'\n'):
break
header_lines.append(data)
assert data.endswith(b'\n')
bytes_read += len(data)
if bytes_read > 32768:
raise ProtocolError('Header too big.')
if not header_lines:
raise ProtocolError('No header received.')
response.parse(b''.join(header_lines))
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_body(self, request, response, file=None, raw=False):
'''Read the response's content body.
Coroutine.
'''
if is_no_body(request, response):
return
if not raw:
self._setup_decompressor(response)
read_strategy = self.get_read_strategy(response)
if self._ignore_length and read_strategy == 'length':
read_strategy = 'close'
if read_strategy == 'chunked':
yield from self._read_body_by_chunk(response, file, raw=raw)
elif read_strategy == 'length':
yield from self._read_body_by_length(response, file)
else:
yield from self._read_body_until_close(response, file)
should_close = wpull.protocol.http.util.should_close(
request.version, response.fields.get('Connection'))
if not self._keep_alive or should_close:
_logger.debug('Not keep-alive. Closing connection.')
self.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _read_body_until_close(self, response, file):
'''Read the response until the connection closes.
Coroutine.
'''
_logger.debug('Reading body until close.')
file_is_async = hasattr(file, 'drain')
while True:
data = yield from self._connection.read(self._read_size)
if not data:
break
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
content_data = self._flush_decompressor()
if file:
file.write(content_data)
if file_is_async:
yield from file.drain() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _read_body_by_length(self, response, file):
'''Read the connection specified by a length.
Coroutine.
'''
_logger.debug('Reading body by length.')
file_is_async = hasattr(file, 'drain')
try:
body_size = int(response.fields['Content-Length'])
if body_size < 0:
raise ValueError('Content length cannot be negative.')
except ValueError as error:
_logger.warning(__(
_('Invalid content length: {error}'), error=error
))
yield from self._read_body_until_close(response, file)
return
bytes_left = body_size
while bytes_left > 0:
data = yield from self._connection.read(self._read_size)
if not data:
break
bytes_left -= len(data)
if bytes_left < 0:
data = data[:bytes_left]
_logger.warning(_('Content overrun.'))
self.close()
self._data_event_dispatcher.notify_read(data)
content_data = self._decompress_data(data)
if file:
file.write(content_data)
if file_is_async:
yield from file.drain()
if bytes_left > 0:
raise NetworkError('Connection closed.')
content_data = self._flush_decompressor()
if file and content_data:
file.write(content_data)
if file_is_async:
yield from file.drain() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _read_body_by_chunk(self, response, file, raw=False):
'''Read the connection using chunked transfer encoding.
Coroutine.
'''
reader = ChunkedTransferReader(self._connection)
file_is_async = hasattr(file, 'drain')
while True:
chunk_size, data = yield from reader.read_chunk_header()
self._data_event_dispatcher.notify_read(data)
if raw:
file.write(data)
if not chunk_size:
break
while True:
content, data = yield from reader.read_chunk_body()
self._data_event_dispatcher.notify_read(data)
if not content:
if raw:
file.write(data)
break
content = self._decompress_data(content)
if file:
file.write(content)
if file_is_async:
yield from file.drain()
content = self._flush_decompressor()
if file:
file.write(content)
if file_is_async:
yield from file.drain()
trailer_data = yield from reader.read_trailer()
self._data_event_dispatcher.notify_read(trailer_data)
if file and raw:
file.write(trailer_data)
if file_is_async:
yield from file.drain()
response.fields.parse(trailer_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_read_strategy(cls, response):
'''Return the appropriate algorithm of reading response.
Returns:
str: ``chunked``, ``length``, ``close``.
'''
chunked_match = re.match(
r'chunked($|;)',
response.fields.get('Transfer-Encoding', '')
)
if chunked_match:
return 'chunked'
elif 'Content-Length' in response.fields:
return 'length'
else:
return 'close' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _setup_decompressor(self, response):
'''Set up the content encoding decompressor.'''
encoding = response.fields.get('Content-Encoding', '').lower()
if encoding == 'gzip':
self._decompressor = wpull.decompression.GzipDecompressor()
elif encoding == 'deflate':
self._decompressor = wpull.decompression.DeflateDecompressor()
else:
self._decompressor = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _decompress_data(self, data):
'''Decompress the given data and return the uncompressed data.'''
if self._decompressor:
try:
return self._decompressor.decompress(data)
except zlib.error as error:
raise ProtocolError(
'zlib error: {0}.'.format(error)
) from error
else:
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _flush_decompressor(self):
'''Return any data left in the decompressor.'''
if self._decompressor:
try:
return self._decompressor.flush()
except zlib.error as error:
raise ProtocolError(
'zlib flush error: {0}.'.format(error)
) from error
else:
return b'' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def gzip_uncompress(data, truncated=False):
'''Uncompress gzip data.
Args:
data (bytes): The gzip data.
truncated (bool): If True, the decompressor is not flushed.
This is a convenience function.
Returns:
bytes: The inflated data.
Raises:
zlib.error
'''
decompressor = SimpleGzipDecompressor()
inflated_data = decompressor.decompress(data)
if not truncated:
inflated_data += decompressor.flush()
return inflated_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_status(self, status: Status, increment_try_count: bool=True,
filename: str=None):
'''Mark the item with the given status.
Args:
status: a value from :class:`Status`.
increment_try_count: if True, increment the ``try_count``
value
'''
url = self.url_record.url
assert not self._try_count_incremented, (url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', url, status))
url_result = URLResult()
url_result.filename = filename
self.app_session.factory['URLTable'].check_in(
url,
status,
increment_try_count=increment_try_count,
url_result=url_result,
)
self._processed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_child_url(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None,
replace: bool=False):
'''Add links scraped from the document with automatic values.
Args:
url: A full URL. (It can't be a relative path.)
inline: Whether the URL is an embedded object.
link_type: Expected link type.
post_data: URL encoded form data. The request will be made using
POST. (Don't use this to upload files.)
level: The child depth of this URL.
replace: Whether to replace the existing entry in the database
table so it will be redownloaded again.
This function provides values automatically for:
* ``inline``
* ``level``
* ``parent``: The referrering page.
* ``root``
See also :meth:`add_url`.
'''
url_properties = URLProperties()
url_properties.level = self.url_record.level + 1 if level is None else level
url_properties.inline_level = (self.url_record.inline_level or 0) + 1 if inline else None
url_properties.parent_url = self.url_record.url
url_properties.root_url = self.url_record.root_url or self.url_record.url
url_properties.link_type = link_type
url_data = URLData()
url_data.post_data = post_data
if replace:
self.app_session.factory['URLTable'].remove_many([url])
self.add_url(url, url_properties, url_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def child_url_record(self, url: str, inline: bool=False,
link_type: Optional[LinkType]=None,
post_data: Optional[str]=None,
level: Optional[int]=None):
'''Return a child URLRecord.
This function is useful for testing filters before adding to table.
'''
url_record = URLRecord()
url_record.url = url
url_record.status = Status.todo
url_record.try_count = 0
url_record.level = self.url_record.level + 1 if level is None else level
url_record.root_url = self.url_record.root_url or self.url_record.url
url_record.parent_url = self.url_record.url
url_record.inline_level = (self.url_record.inline_level or 0) + 1 if inline else 0
url_record.link_type = link_type
url_record.post_data = post_data
return url_record |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_heading_encoding(response):
'''Return the document encoding from a HTTP header.
Args:
response (Response): An instance of :class:`.http.Response`.
Returns:
``str``, ``None``: The codec name.
'''
encoding = wpull.protocol.http.util.parse_charset(
response.fields.get('content-type', ''))
if encoding:
return wpull.string.normalize_codec_name(encoding)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def detect_response_encoding(response, is_html=False, peek=131072):
'''Return the likely encoding of the response document.
Args:
response (Response): An instance of :class:`.http.Response`.
is_html (bool): See :func:`.util.detect_encoding`.
peek (int): The maximum number of bytes of the document to be analyzed.
Returns:
``str``, ``None``: The codec name.
'''
encoding = get_heading_encoding(response)
encoding = wpull.string.detect_encoding(
wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html
)
_logger.debug(__('Got encoding: {0}', encoding))
return encoding |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def contains(self, url: str):
'''Return whether the URL is in the table.'''
try:
self.get_one(url)
except NotFound:
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add_one(self, url: str,
url_properties: Optional[URLProperties]=None,
url_data: Optional[URLData]=None):
'''Add a single URL to the table.
Args:
url: The URL to be added
url_properties: Additional values to be saved
url_data: Additional data to be saved
'''
self.add_many([AddURLInfo(url, url_properties, url_data)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def stream(self):
'''Iterate the file stream.
Returns:
iterator: Each item is a tuple:
1. None, regex match
2. str
'''
chunk_a = None
chunk_b = None
chunk_a_index = 0
chunk_b_index = 0
search_start_index = 0
while True:
chunk_a = chunk_b
chunk_a_index = chunk_b_index
chunk_b = self._file.read(self._read_size)
if chunk_a is None:
continue
chunk_b_index = chunk_a_index + len(chunk_a)
if not chunk_a:
break
current_chunk = chunk_a + chunk_b[:self._overlap_size]
offset_end = len(chunk_a) + self._overlap_size
while True:
offset_start = search_start_index - chunk_a_index
match = self._pattern.search(
current_chunk, offset_start, offset_end)
if not match:
unmatched_part = chunk_a[offset_start:]
if unmatched_part:
yield (None, unmatched_part)
search_start_index += len(unmatched_part)
break
start_index, end_index = match.span(match.lastindex)
unmatched_part = current_chunk[offset_start:start_index]
if unmatched_part:
yield (None, unmatched_part)
yield (match, match.group(match.lastindex))
search_start_index += len(unmatched_part) + \
len(match.group(match.lastindex)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def notify(self, *args, **kwargs):
'''Call all the callback handlers with given arguments.'''
for handler in tuple(self.handlers):
handler(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def new(self, name, *args, **kwargs):
'''Create an instance.
Args:
name (str): The name of the class
args: The arguments to pass to the class.
kwargs: The keyword arguments to pass to the class.
Returns:
instance
'''
if name in self._instance_map:
raise ValueError('Instance {0} is already initialized'
.format(name))
instance = self._class_map[name](*args, **kwargs)
self._instance_map[name] = instance
return instance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_all_initialized(self):
'''Return whether all the instances have been initialized.
Returns:
bool
'''
return frozenset(self._class_map.keys()) == \
frozenset(self._instance_map.keys()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def normalize_name(name, overrides=None):
'''Normalize the key name to title case.
For example, ``normalize_name('content-id')`` will become ``Content-Id``
Args:
name (str): The name to normalize.
overrides (set, sequence): A set or sequence containing keys that
should be cased to themselves. For example, passing
``set('WARC-Type')`` will normalize any key named "warc-type" to
``WARC-Type`` instead of the default ``Warc-Type``.
Returns:
str
'''
normalized_name = name.title()
if overrides:
override_map = dict([(name.title(), name) for name in overrides])
return override_map.get(normalized_name, normalized_name)
else:
return normalized_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def guess_line_ending(string):
'''Return the most likely line delimiter from the string.'''
assert isinstance(string, str), 'Expect str. Got {}'.format(type(string))
crlf_count = string.count('\r\n')
lf_count = string.count('\n')
if crlf_count >= lf_count:
return '\r\n'
else:
return '\n' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def unfold_lines(string):
'''Join lines that are wrapped.
Any line that starts with a space or tab is joined to the previous
line.
'''
assert isinstance(string, str), 'Expect str. Got {}'.format(type(string))
lines = string.splitlines()
line_buffer = io.StringIO()
for line_number in range(len(lines)):
line = lines[line_number]
if line and line[0:1] in (' ', '\t'):
line_buffer.write(' ')
elif line_number != 0:
line_buffer.write('\r\n')
line_buffer.write(line.strip())
line_buffer.write('\r\n')
return line_buffer.getvalue() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse(self, string, strict=True):
'''Parse the string or bytes.
Args:
strict (bool): If True, errors will not be ignored
Raises:
:class:`ValueError` if the record is malformed.
'''
if isinstance(string, bytes):
errors = 'strict' if strict else 'replace'
string = string.decode(self.encoding, errors=errors)
if not self.raw:
self.raw = string
else:
self.raw += string
lines = unfold_lines(string).splitlines()
for line in lines:
if line:
if ':' not in line:
if strict:
raise ValueError('Field missing colon.')
else:
continue
name, value = line.split(':', 1)
name = name.strip()
value = value.strip()
self.add(name, value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add(self, name, value):
'''Append the name-value pair to the record.'''
normalized_name = normalize_name(name, self._normalize_overrides)
self._map[normalized_name].append(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_list(self, name):
'''Return all the values for given name.'''
normalized_name = normalize_name(name, self._normalize_overrides)
return self._map[normalized_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_all(self):
'''Return an iterator of name-value pairs.'''
for name, values in self._map.items():
for value in values:
yield (name, value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_str(self):
'''Convert to string.'''
pairs = []
for name, value in self.get_all():
if value and self._wrap_width:
pairs.append('{0}:{1}'.format(
name,
'\r\n'.join(textwrap.wrap(
value, width=self._wrap_width,
drop_whitespace=False, initial_indent=' ',
subsequent_indent=' '
))
))
elif value:
pairs.append('{0}: {1}'.format(name, value))
else:
pairs.append('{0}:'.format(name))
pairs.append('')
return '\r\n'.join(pairs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_bytes(self, errors='strict'):
'''Convert to bytes.'''
return str(self).encode(self.encoding, errors=errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def recycle(self):
'''Clean up and return connections back to the pool.
Connections should be kept alive if supported.
'''
for connection in self._connections:
self._connection_pool.no_wait_release(connection)
self._connections.clear() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def session(self) -> SessionT:
'''Return a new session.'''
session = self._session_class()(
connection_pool=self._connection_pool,
)
self.event_dispatcher.notify(self.ClientEvent.new_session, session)
return session |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count_cookies(self, domain):
'''Return the number of cookies for the given domain.'''
cookies = self.cookie_jar._cookies
if domain in cookies:
return sum(
[len(cookie) for cookie in cookies[domain].values()]
)
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def cookie_length(self, domain):
'''Return approximate length of all cookie key-values for a domain.'''
cookies = self.cookie_jar._cookies
if domain not in cookies:
return 0
length = 0
for path in cookies[domain]:
for name in cookies[domain][path]:
cookie = cookies[domain][path][name]
length += len(path) + len(name) + len(cookie.value or '')
return length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def guess_listing_type(lines, threshold=100):
'''Guess the style of directory listing.
Returns:
str: ``unix``, ``msdos``, ``nlst``, ``unknown``.
'''
scores = {
'unix': 0,
'msdos': 0,
'nlst': 0,
}
for line in lines:
if not line:
continue
if re.search(r'---|r--|rw-|rwx', line):
scores['unix'] += 1
if '<DIR>' in line or re.search(r'^.{0,4}\d\d', line):
scores['msdos'] += 1
words = line.split(' ', 1)
if len(words) == 1:
scores['nlst'] += 1
if max(scores.values()) > threshold:
break
top = max(scores.items(), key=lambda item: item[1])
if top[1]:
return top[0]
else:
return 'unknown' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_unix_perm(text):
'''Parse a Unix permission string and return integer value.'''
# Based on ftp-ls.c symperms
if len(text) != 9:
return 0
perms = 0
for triad_index in range(3):
string_index = triad_index * 3
perms <<= 3
if text[string_index] == 'r':
perms |= 1 << 2
if text[string_index + 1] == 'w':
perms |= 1 << 1
if text[string_index + 2] in 'xs':
perms |= 1
return perms |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse(self, lines):
'''Parse the lines.'''
if self.type == 'msdos':
return self.parse_msdos(lines)
elif self.type == 'unix':
return self.parse_unix(lines)
elif self.type == 'nlst':
return self.parse_nlst(lines)
else:
raise UnknownListingError('Unsupported listing type.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_datetime(self, text):
'''Parse datetime from line of text.'''
return parse_datetime(text, date_format=self.date_format,
is_day_period=self.is_day_period) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_msdos(self, lines):
'''Parse lines from a MS-DOS format.'''
for line in lines:
fields = line.split(None, 4)
date_str = fields[0]
time_str = fields[1]
datetime_str = '{} {}'.format(date_str, time_str)
file_datetime = self.parse_datetime(datetime_str)[0]
if fields[2] == '<DIR>':
file_size = None
file_type = 'dir'
else:
file_size = parse_int(fields[2])
file_type = 'file'
filename = fields[3]
yield FileEntry(filename, file_type, file_size, file_datetime) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_unix(self, lines):
'''Parse listings from a Unix ls command format.'''
# This method uses some Filezilla parsing algorithms
for line in lines:
original_line = line
fields = line.split(' ')
after_perm_index = 0
# Search for the permissions field by checking the file type
for field in fields:
after_perm_index += len(field)
if not field:
continue
# If the filesystem goes corrupt, it may show ? instead
# but I don't really care in that situation.
if field[0] in 'bcdlps-':
if field[0] == 'd':
file_type = 'dir'
elif field[0] == '-':
file_type = 'file'
elif field[0] == 'l':
file_type = 'symlink'
else:
file_type = 'other'
perms = parse_unix_perm(field[1:])
break
else:
raise ListingError('Failed to parse file type.')
line = line[after_perm_index:]
# We look for the position of the date and use the integer
# before it as the file size.
# We look for the position of the time and use the text
# after it as the filename
while line:
try:
datetime_obj, start_index, end_index = self.parse_datetime(line)
except ValueError:
line = line[4:]
else:
break
else:
raise ListingError(
'Could parse a date from {}'.format(repr(original_line)))
file_size = int(line[:start_index].rstrip().rpartition(' ')[-1])
filename = line[end_index:].strip()
if file_type == 'symlink':
filename, sep, symlink_dest = filename.partition(' -> ')
else:
symlink_dest = None
yield FileEntry(filename, file_type, file_size, datetime_obj,
symlink_dest, perm=perms) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_input(self):
'''Parse the listings.
Returns:
iter: A iterable of :class:`.ftp.ls.listing.FileEntry`
'''
if self._text:
lines = iter(self._text.splitlines())
elif self._file:
lines = self._file
else:
lines = ()
sample_lines = []
for line in lines:
if len(sample_lines) > 100:
break
sample_lines.append(line)
lines = itertools.chain(sample_lines, lines)
self.guess_type(sample_lines)
datetime_format = wpull.protocol.ftp.ls.date.guess_datetime_format(
sample_lines)
self.set_datetime_format(datetime_format)
return self.parse(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):
'''Open a file object on to the Response Body.
Args:
filename: The path where the file is to be saved
response: Response
mode: The file mode
This function will create the directories if not exist.
'''
_logger.debug('Saving file to {0}, mode={1}.',
filename, mode)
dir_path = os.path.dirname(filename)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
response.body = Body(open(filename, mode)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_timestamp(cls, filename: str, response: HTTPResponse):
'''Set the Last-Modified timestamp onto the given file.
Args:
filename: The path of the file
response: Response
'''
last_modified = response.fields.get('Last-Modified')
if not last_modified:
return
try:
last_modified = email.utils.parsedate(last_modified)
except ValueError:
_logger.exception('Failed to parse date.')
return
last_modified = time.mktime(last_modified)
os.utime(filename, (time.time(), last_modified)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def save_headers(cls, filename: str, response: HTTPResponse):
'''Prepend the HTTP response header to the file.
Args:
filename: The path of the file
response: Response
'''
new_filename = filename + '-new'
with open('wb') as new_file:
new_file.write(response.header())
with wpull.util.reset_file_offset(response.body):
response.body.seek(0)
shutil.copyfileobj(response.body, new_file)
os.remove(filename)
os.rename(new_filename, filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _compute_filename(self, request: BaseRequest):
'''Get the appropriate filename from the request.'''
path = self._path_namer.get_filename(request.url_info)
if os.path.isdir(path):
path += '.f'
else:
dir_name, name = os.path.split(path)
path = os.path.join(anti_clobber_dir_path(dir_name), name)
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_file_continue_request(self, request: BaseRequest):
'''Modify the request to resume downloading file.'''
if os.path.exists(self._filename):
size = os.path.getsize(self._filename)
request.set_continue(size)
self._file_continue_requested = True
_logger.debug('Continue file from {0}.', size)
else:
_logger.debug('No file to continue.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_file_continue_response(self, response: HTTPResponse):
'''Process a partial content response.'''
code = response.status_code
if code == http.client.PARTIAL_CONTENT:
self.open_file(self._filename, response, mode='ab+')
else:
self._raise_cannot_continue_error() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_file_continue_ftp_response(self, response: FTPResponse):
'''Process a restarted content response.'''
if response.request.restart_value and response.restart_value:
self.open_file(self._filename, response, mode='ab+')
else:
self._raise_cannot_continue_error() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _rename_with_content_disposition(self, response: HTTPResponse):
'''Rename using the Content-Disposition header.'''
if not self._filename:
return
if response.request.url_info.scheme not in ('http', 'https'):
return
header_value = response.fields.get('Content-Disposition')
if not header_value:
return
filename = parse_content_disposition(header_value)
if filename:
dir_path = os.path.dirname(self._filename)
new_filename = self._path_namer.safe_filename(filename)
self._filename = os.path.join(dir_path, new_filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def session(self) -> BaseFileWriterSession:
'''Return the File Writer Session.'''
return self.session_class(
self._path_namer,
self._file_continuing,
self._headers_included,
self._local_timestamping,
self._adjust_extension,
self._content_disposition,
self._trust_server_names,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_charset(header_string):
'''Parse a "Content-Type" string for the document encoding.
Returns:
str, None
'''
match = re.search(
r'''charset[ ]?=[ ]?["']?([a-z0-9_-]+)''',
header_string,
re.IGNORECASE
)
if match:
return match.group(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def should_close(http_version, connection_field):
'''Return whether the connection should be closed.
Args:
http_version (str): The HTTP version string like ``HTTP/1.0``.
connection_field (str): The value for the ``Connection`` header.
'''
connection_field = (connection_field or '').lower()
if http_version == 'HTTP/1.0':
return connection_field.replace('-', '') != 'keepalive'
else:
return connection_field == 'close' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def seek_file_end(file):
'''Seek to the end of the file.'''
try:
file.seek(0, 2)
except ValueError:
# gzip files don't support seek from end
while True:
data = file.read(4096)
if not data:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_iso8601_str(string):
'''Parse a fixed ISO8601 datetime string.
.. Note:: This function only parses dates in the format
``%Y-%m-%dT%H:%M:%SZ``. You must use a library like ``dateutils``
to properly parse dates and times.
Returns:
float: A UNIX timestamp.
'''
datetime_obj = datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ")
return int(calendar.timegm(datetime_obj.utctimetuple())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def python_version():
'''Return the Python version as a string.'''
major, minor, patch = sys.version_info[0:3]
return '{0}.{1}.{2}'.format(major, minor, patch) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def filter_pem(data):
'''Processes the bytes for PEM certificates.
Returns:
``set`` containing each certificate
'''
assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data))
certs = set()
new_list = []
in_pem_block = False
for line in re.split(br'[\r\n]+', data):
if line == b'-----BEGIN CERTIFICATE-----':
assert not in_pem_block
in_pem_block = True
elif line == b'-----END CERTIFICATE-----':
assert in_pem_block
in_pem_block = False
content = b''.join(new_list)
content = rewrap_bytes(content)
certs.add(b'-----BEGIN CERTIFICATE-----\n' +
content +
b'\n-----END CERTIFICATE-----\n')
new_list = []
elif in_pem_block:
new_list.append(line)
return certs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rewrap_bytes(data):
'''Rewrap characters to 70 character width.
Intended to rewrap base64 content.
'''
return b'\n'.join(
data[index:index+70] for index in range(0, len(data), 70)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_package_data(filename, mode='rb'):
'''Return the contents of a real file or a zip file.'''
if os.path.exists(filename):
with open(filename, mode=mode) as in_file:
return in_file.read()
else:
parts = os.path.normpath(filename).split(os.sep)
for part, index in zip(parts, range(len(parts))):
if part.endswith('.zip'):
zip_path = os.sep.join(parts[:index + 1])
member_path = os.sep.join(parts[index + 1:])
break
if platform.system() == 'Windows':
member_path = member_path.replace('\\', '/')
with zipfile.ZipFile(zip_path) as zip_file:
return zip_file.read(member_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_package_filename(filename, package_dir=None):
'''Return the filename of the data file.'''
if getattr(sys, 'frozen', False):
package_dir = os.path.join(
sys._MEIPASS,
os.path.basename(os.path.dirname(__file__))
)
elif not package_dir:
package_dir = os.path.dirname(__file__)
return os.path.join(package_dir, filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_exception_message(instance):
'''Try to get the exception message or the class name.'''
args = getattr(instance, 'args', None)
if args:
return str(instance)
try:
return type(instance).__name__
except AttributeError:
return str(instance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dump(self, obj):
'''Pickle an object.'''
pickle.dump(obj, self._file, protocol=self._protocol) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it. Most strings will be quoted using double quotes. Bob's Bar -> "Bob's Bar" If a string contains double quotes, it will be quoted using single quotes. Welcome to "my bar" -> 'Welcome to "my bar"' If a string contains both single and double quotes, the double quotes will be escaped, and the string will be quoted using double quotes. Welcome to "Bob's Bar" -> "Welcome to "Bob's bar" """ |
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def encodings(self):
"""Yield a number of encodings that might work for this markup.""" |
tried = set()
for e in self.override_encodings:
if self._usable(e, tried):
yield e
# Did the document originally start with a byte-order mark
# that indicated its encoding?
if self._usable(self.sniffed_encoding, tried):
yield self.sniffed_encoding
# Look within the document for an XML or HTML encoding
# declaration.
if self.declared_encoding is None:
self.declared_encoding = self.find_declared_encoding(
self.markup, self.is_html)
if self._usable(self.declared_encoding, tried):
yield self.declared_encoding
# Use third-party character set detection to guess at the
# encoding.
if self.chardet_encoding is None:
self.chardet_encoding = chardet_dammit(self.markup)
if self._usable(self.chardet_encoding, tried):
yield self.chardet_encoding
# As a last-ditch effort, try utf-8 and windows-1252.
for e in ('utf-8', 'windows-1252'):
if self._usable(e, tried):
yield e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip_byte_order_mark(cls, data):
"""If a byte-order mark is present, strip it and return the encoding it implies.""" |
encoding = None
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == b'\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == b'\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
return data, encoding |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
"""Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. """ |
if search_entire_document:
xml_endpos = html_endpos = len(markup)
else:
xml_endpos = 1024
html_endpos = max(2048, int(len(markup) * 0.05))
declared_encoding = None
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
if not declared_encoding_match and is_html:
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
if declared_encoding_match is not None:
declared_encoding = declared_encoding_match.groups()[0].decode(
'ascii', 'replace')
if declared_encoding:
return declared_encoding.lower()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML entity, or an ASCII character.""" |
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detwingle(cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"):
"""Fix characters from one encoding embedded in some other encoding. Currently the only situation supported is Windows-1252 (or its subset ISO-8859-1), embedded in UTF-8. The input must be a bytestring. If you've already converted the document to Unicode, you're too late. The output is a bytestring in which `embedded_encoding` characters have been converted to their `main_encoding` equivalents. """ |
if embedded_encoding.replace('_', '-').lower() not in (
'windows-1252', 'windows_1252'):
raise NotImplementedError(
"Windows-1252 and ISO-8859-1 are the only currently supported "
"embedded encodings.")
if main_encoding.lower() not in ('utf8', 'utf-8'):
raise NotImplementedError(
"UTF-8 is the only currently supported main encoding.")
byte_chunks = []
chunk_start = 0
pos = 0
while pos < len(in_bytes):
byte = in_bytes[pos]
if not isinstance(byte, int):
# Python 2.x
byte = ord(byte)
if (byte >= cls.FIRST_MULTIBYTE_MARKER
and byte <= cls.LAST_MULTIBYTE_MARKER):
# This is the start of a UTF-8 multibyte character. Skip
# to the end.
for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
if byte >= start and byte <= end:
pos += size
break
elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
# We found a Windows-1252 character!
# Save the string up to this point as a chunk.
byte_chunks.append(in_bytes[chunk_start:pos])
# Now translate the Windows-1252 character into UTF-8
# and add it as another, one-byte chunk.
byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
pos += 1
chunk_start = pos
else:
# Go on to the next character.
pos += 1
if chunk_start == 0:
# The string is unchanged.
return in_bytes
else:
# Store the final chunk.
byte_chunks.append(in_bytes[chunk_start:])
return b''.join(byte_chunks) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def scrape_file(self, file, encoding=None, base_url=None):
'''Scrape a file for links.
See :meth:`scrape` for the return value.
'''
elements = self.iter_elements(file, encoding=encoding)
link_contexts = set()
link_infos = self._element_walker.iter_links(elements)
for link_info in link_infos:
element_base_url = base_url
if link_info.base_link:
clean_base_url = clean_link_soup(link_info.base_link)
if element_base_url and base_url:
element_base_url = urljoin_safe(
base_url, clean_base_url
) or base_url
if element_base_url:
url = urljoin_safe(
element_base_url,
clean_link_soup(link_info.link),
allow_fragments=False
)
else:
url = clean_link_soup(link_info.link)
if url:
link_contexts.add(LinkContext(
url,
inline=link_info.inline,
linked=link_info.linked,
link_type=link_info.link_type,
extra=link_info
))
scrape_result = ScrapeResult(link_contexts, encoding)
scrape_result['base_url'] = base_url
return scrape_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _is_accepted(self, element_tag):
'''Return if the link is accepted by the filters.'''
element_tag = element_tag.lower()
if self._ignored_tags is not None \
and element_tag in self._ignored_tags:
return False
if self._followed_tags is not None:
return element_tag in self._followed_tags
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links(self, elements):
'''Iterate the document root for links.
Returns:
iterable: A iterator of :class:`LinkedInfo`.
'''
for element in elements:
if not isinstance(element, Element):
continue
for link_infos in self.iter_links_element(element):
yield link_infos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_element(self, element):
'''Iterate a HTML element.'''
# reference: lxml.html.HtmlMixin.iterlinks()
attrib = element.attrib
tag = element.tag
if tag == 'link':
iterable = self.iter_links_link_element(element)
elif tag == 'meta':
iterable = self.iter_links_meta_element(element)
elif tag in ('object', 'applet'):
iterable = self.iter_links_object_element(element)
elif tag == 'param':
iterable = self.iter_links_param_element(element)
elif tag == 'style':
iterable = self.iter_links_style_element(element)
elif tag == 'script':
iterable = self.iter_links_script_element(element)
else:
iterable = self.iter_links_plain_element(element)
# RSS/Atom
if tag in ('link', 'url', 'icon'):
iterable = itertools.chain(
iterable, self.iter_links_element_text(element)
)
for link_info in iterable:
yield link_info
if 'style' in attrib and self.css_scraper:
for link in self.css_scraper.scrape_links(attrib['style']):
yield LinkInfo(
element=element, tag=element.tag, attrib='style',
link=link,
inline=True, linked=False,
base_link=None,
value_type='css',
link_type=LinkType.media,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_element_text(cls, element):
'''Get the element text as a link.'''
if element.text:
link_type = identify_link_type(element.text)
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=element.text,
inline=False, linked=True,
base_link=None,
value_type='plain',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_link_element(self, element):
'''Iterate a ``link`` for URLs.
This function handles stylesheets and icons in addition to
standard scraping rules.
'''
rel = element.attrib.get('rel', '')
stylesheet = 'stylesheet' in rel
icon = 'icon' in rel
inline = stylesheet or icon
if stylesheet:
link_type = LinkType.css
elif icon:
link_type = LinkType.media
else:
link_type = None
for attrib_name, link in self.iter_links_by_attrib(element):
yield LinkInfo(
element=element, tag=element.tag, attrib=attrib_name,
link=link,
inline=inline, linked=not inline,
base_link=None,
value_type='plain',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_meta_element(cls, element):
'''Iterate the ``meta`` element for links.
This function handles refresh URLs.
'''
if element.attrib.get('http-equiv', '').lower() == 'refresh':
content_value = element.attrib.get('content')
if content_value:
link = parse_refresh(content_value)
if link:
yield LinkInfo(
element=element, tag=element.tag, attrib='http-equiv',
link=link,
inline=False, linked=True,
base_link=None,
value_type='refresh',
link_type=None # treat it as a redirect
)
else:
for link_info in cls.iter_links_open_graph_meta(element):
yield link_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_object_element(cls, element):
'''Iterate ``object`` and ``embed`` elements.
This function also looks at ``codebase`` and ``archive`` attributes.
'''
base_link = element.attrib.get('codebase', None)
if base_link:
# lxml returns codebase as inline
link_type = element.attrib.get(base_link)
yield LinkInfo(
element=element, tag=element.tag, attrib='codebase',
link=base_link,
inline=True, linked=False,
base_link=None,
value_type='plain',
link_type=link_type
)
for attribute in ('code', 'src', 'classid', 'data'):
if attribute in element.attrib:
link_type = identify_link_type(element.attrib.get(attribute))
yield LinkInfo(
element=element, tag=element.tag, attrib=attribute,
link=element.attrib.get(attribute),
inline=True, linked=False,
base_link=base_link,
value_type='plain',
link_type=link_type
)
if 'archive' in element.attrib:
for match in re.finditer(r'[^ ]+', element.attrib.get('archive')):
value = match.group(0)
link_type = identify_link_type(value)
yield LinkInfo(
element=element, tag=element.tag, attrib='archive',
link=value,
inline=True, linked=False,
base_link=base_link,
value_type='list',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_param_element(cls, element):
'''Iterate a ``param`` element.'''
valuetype = element.attrib.get('valuetype', '')
if valuetype.lower() == 'ref' and 'value' in element.attrib:
link_type = identify_link_type(element.attrib.get('value'))
yield LinkInfo(
element=element, tag=element.tag, attrib='value',
link=element.attrib.get('value'),
inline=True, linked=False,
base_link=None,
value_type='plain',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_style_element(self, element):
'''Iterate a ``style`` element.'''
if self.css_scraper and element.text:
link_iter = self.css_scraper.scrape_links(element.text,
context=True)
for link, context in link_iter:
if context == 'import':
link_type = LinkType.css
else:
link_type = LinkType.media
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=link,
inline=True, linked=False,
base_link=None,
value_type='css',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_script_element(self, element):
'''Iterate a ``script`` element.'''
if self.javascript_scraper and element.text:
link_iter = self.javascript_scraper.scrape_links(element.text,
context=True)
for link, context in link_iter:
inline = is_likely_inline(link)
if context is True:
link_type = None
else:
link_type = context
yield LinkInfo(
element=element, tag=element.tag, attrib=None,
link=link,
inline=inline, linked=not inline,
base_link=None,
value_type='script',
link_type=link_type
)
for link in self.iter_links_plain_element(element):
yield link |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_plain_element(self, element):
'''Iterate any element for links using generic rules.'''
for attrib_name, link in self.iter_links_by_attrib(element):
if attrib_name in self.LINK_ATTRIBUTES:
inline = self.is_link_inline(element.tag, attrib_name)
linked = self.is_html_link(element.tag, attrib_name)
else:
inline = is_likely_inline(link)
linked = not inline
link_type = identify_link_type(link)
yield LinkInfo(
element=element, tag=element.tag, attrib=attrib_name,
link=link,
inline=inline, linked=linked,
base_link=None,
value_type='plain',
link_type=link_type
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_by_attrib(self, element):
'''Iterate an element by looking at its attributes for links.'''
for attrib_name in element.attrib.keys():
attrib_value = element.attrib.get(attrib_name)
if attrib_name in self.LINK_ATTRIBUTES:
if self.javascript_scraper and \
attrib_value.lstrip().startswith('javascript:'):
for link in self.iter_links_by_js_attrib(
attrib_name, percent_decode(attrib_value)):
yield link
else:
yield attrib_name, attrib_value
elif self.javascript_scraper and \
attrib_name[:5] in self.DYNAMIC_ATTRIBUTES:
for link in self.iter_links_by_js_attrib(attrib_name,
attrib_value):
yield link
elif attrib_name.startswith('data-'):
if is_likely_link(attrib_value) \
and not is_unlikely_link(attrib_value):
yield attrib_name, attrib_value
elif attrib_name == 'srcset':
items = self.iter_links_by_srcset_attrib(
attrib_name, attrib_value)
for item in items:
yield item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_links_by_js_attrib(self, attrib_name, attrib_value):
'''Iterate links of a JavaScript pseudo-link attribute.'''
links = self.javascript_scraper.scrape_links(attrib_value)
for link in links:
yield attrib_name, link |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_link_inline(cls, tag, attribute):
'''Return whether the link is likely to be inline object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_INLINE
return attribute != 'href' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_html_link(cls, tag, attribute):
'''Return whether the link is likely to be external object.'''
if tag in cls.TAG_ATTRIBUTES \
and attribute in cls.TAG_ATTRIBUTES[tag]:
attr_flags = cls.TAG_ATTRIBUTES[tag][attribute]
return attr_flags & cls.ATTR_HTML
return attribute == 'href' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def robots_cannot_follow(cls, element):
'''Return whether we cannot follow links due to robots.txt directives.
'''
return (
element.tag == 'meta'
and element.attrib.get('name', '').lower() == 'robots'
and 'nofollow' in element.attrib.get('value', '').lower()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iter_processed_text(self, file, encoding=None, base_url=None):
'''Return the file text and processed absolute links.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
base_url (str): The URL at which the document is located.
Returns:
iterator: Each item is a tuple:
1. str: The text
2. bool: Whether the text a link
'''
for text, is_link in self.iter_text(file, encoding):
if is_link and base_url:
new_link = urljoin_safe(base_url, text, allow_fragments=False)
if new_link:
yield (new_link, is_link)
else:
yield (new_link, False)
else:
yield (text, is_link) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.