text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_port_default(self):
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def hostname_with_port(self):
'''Return the host portion but omit default port if needed.'''
default_port = RELATIVE_SCHEME_DEFAULT_PORTS.get(self.scheme)
if not default_port:
return ''
assert '[' not in self.hostname
assert ']' not in self.hostname
if self.is_ipv6():
hostname = '[{}]'.format(self.hostname)
else:
hostname = self.hostname
if default_port != self.port:
return '{}:{}'.format(hostname, self.port)
else:
return hostname |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _new_url_record(cls, request: Request) -> URLRecord:
'''Return new empty URLRecord.'''
url_record = URLRecord()
url_record.url = request.url_info.url
url_record.status = Status.in_progress
url_record.try_count = 0
url_record.level = 0
return url_record |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _server_begin_response_callback(self, response: Response):
'''Pre-response callback handler.'''
self._item_session.response = response
if self._cookie_jar:
self._cookie_jar.extract_cookies(response, self._item_session.request)
action = self._result_rule.handle_pre_response(self._item_session)
self._file_writer_session.process_response(response)
return action == Actions.NORMAL |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _server_end_response_callback(self, respoonse: Response):
'''Response callback handler.'''
request = self._item_session.request
response = self._item_session.response
_logger.info(__(
_('Fetched ‘{url}’: {status_code} {reason}. '
'Length: {content_length} [{content_type}].'),
url=request.url,
status_code=response.status_code,
reason=wpull.string.printable_str(response.reason),
content_length=wpull.string.printable_str(
response.fields.get('Content-Length', _('none'))),
content_type=wpull.string.printable_str(
response.fields.get('Content-Type', _('none'))),
))
self._result_rule.handle_response(self._item_session)
if response.status_code in WebProcessor.DOCUMENT_STATUS_CODES:
filename = self._file_writer_session.save_document(response)
self._processing_rule.scrape_document(self._item_session)
self._result_rule.handle_document(self._item_session, filename)
elif response.status_code in WebProcessor.NO_DOCUMENT_STATUS_CODES:
self._file_writer_session.discard_document(response)
self._result_rule.handle_no_document(self._item_session)
else:
self._file_writer_session.discard_document(response)
self._result_rule.handle_document_error(self._item_session) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _init_stream(self):
'''Create streams and commander.
Coroutine.
'''
assert not self._control_connection
self._control_connection = yield from self._acquire_request_connection(self._request)
self._control_stream = ControlStream(self._control_connection)
self._commander = Commander(self._control_stream)
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_receive_data)
self._control_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.control_send_data)
self._control_stream.data_event_dispatcher.add_write_listener(write_callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _log_in(self):
'''Connect and login.
Coroutine.
'''
username = self._request.url_info.username or self._request.username or 'anonymous'
password = self._request.url_info.password or self._request.password or '-wpull@'
cached_login = self._login_table.get(self._control_connection)
if cached_login and cached_login == (username, password):
_logger.debug('Reusing existing login.')
return
try:
yield from self._commander.login(username, password)
except FTPServerError as error:
raise AuthenticationError('Login error: {}'.format(error)) \
from error
self._login_table[self._control_connection] = (username, password) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start(self, request: Request) -> Response:
'''Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(request)
if request.restart_value:
try:
yield from self._commander.restart(request.restart_value)
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
yield from self._open_data_stream()
command = Command('RETR', request.file_path)
yield from self._begin_stream(command)
self._session_state = SessionState.file_request_sent
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start_listing(self, request: Request) -> ListingResponse:
'''Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
'''
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = ListingResponse()
yield from self._prepare_fetch(request, response)
yield from self._open_data_stream()
mlsd_command = Command('MLSD', self._request.file_path)
list_command = Command('LIST', self._request.file_path)
try:
yield from self._begin_stream(mlsd_command)
self._listing_type = 'mlsd'
except FTPServerError as error:
if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized,
ReplyCodes.command_not_implemented):
self._listing_type = None
else:
raise
if not self._listing_type:
# This code not in exception handler to avoid incorrect
# exception chaining
yield from self._begin_stream(list_command)
self._listing_type = 'list'
_logger.debug('Listing type is %s', self._listing_type)
self._session_state = SessionState.directory_request_sent
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _prepare_fetch(self, request: Request, response: Response):
'''Prepare for a fetch.
Coroutine.
'''
self._request = request
self._response = response
yield from self._init_stream()
connection_closed = self._control_connection.closed()
if connection_closed:
self._login_table.pop(self._control_connection, None)
yield from self._control_stream.reconnect()
request.address = self._control_connection.address
connection_reused = not connection_closed
self.event_dispatcher.notify(self.Event.begin_control, request, connection_reused=connection_reused)
if connection_closed:
yield from self._commander.read_welcome_message()
yield from self._log_in()
self._response.request = request |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _begin_stream(self, command: Command):
'''Start data stream transfer.'''
begin_reply = yield from self._commander.begin_stream(command)
self._response.reply = begin_reply
self.event_dispatcher.notify(self.Event.begin_transfer, self._response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse:
'''Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
'''
if self._session_state != SessionState.directory_request_sent:
raise RuntimeError('File request not sent')
self._session_state = SessionState.file_request_sent
yield from self.download(file=file, rewind=False,
duration_timeout=duration_timeout)
try:
if self._response.body.tell() == 0:
listings = ()
elif self._listing_type == 'mlsd':
self._response.body.seek(0)
machine_listings = wpull.protocol.ftp.util.parse_machine_listing(
self._response.body.read().decode('utf-8',
errors='surrogateescape'),
convert=True, strict=False
)
listings = list(
wpull.protocol.ftp.util.machine_listings_to_file_entries(
machine_listings
))
else:
self._response.body.seek(0)
file = io.TextIOWrapper(self._response.body, encoding='utf-8',
errors='surrogateescape')
listing_parser = ListingParser(file=file)
listings = list(listing_parser.parse_input())
_logger.debug('Listing detected as %s', listing_parser.type)
# We don't want the file to be closed when exiting this function
file.detach()
except (ListingError, ValueError) as error:
raise ProtocolError(*error.args) from error
self._response.files = listings
self._response.body.seek(0)
self._session_state = SessionState.response_received
return self._response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _open_data_stream(self):
'''Open the data stream connection.
Coroutine.
'''
@asyncio.coroutine
def connection_factory(address: Tuple[int, int]):
self._data_connection = yield from self._acquire_connection(address[0], address[1])
return self._data_connection
self._data_stream = yield from self._commander.setup_data_stream(
connection_factory
)
self._response.data_address = self._data_connection.address
read_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_receive_data)
self._data_stream.data_event_dispatcher.add_read_listener(read_callback)
write_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_send_data)
self._data_stream.data_event_dispatcher.add_write_listener(write_callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _fetch_size(self, request: Request) -> int:
'''Return size of file.
Coroutine.
'''
try:
size = yield from self._commander.size(request.file_path)
return size
except FTPServerError:
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_refresh(text):
'''Parses text for HTTP Refresh URL.
Returns:
str, None
'''
match = re.search(r'url\s*=(.+)', text, re.IGNORECASE)
if match:
url = match.group(1)
if url.startswith('"'):
url = url.strip('"')
elif url.startswith("'"):
url = url.strip("'")
return clean_link_soup(url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_likely_inline(link):
'''Return whether the link is likely to be inline.'''
file_type = mimetypes.guess_type(link, strict=False)[0]
if file_type:
top_level_type, subtype = file_type.split('/', 1)
return top_level_type in ('image', 'video', 'audio') or subtype == 'javascript' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_likely_link(text):
'''Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
text = text.lower()
# Check for absolute or relative URLs
if (
text.startswith('http://')
or text.startswith('https://')
or text.startswith('ftp://')
or text.startswith('/')
or text.startswith('//')
or text.endswith('/')
or text.startswith('../')
):
return True
# Check if it has a alphanumeric file extension and not a decimal number
dummy, dot, file_extension = text.rpartition('.')
if dot and file_extension and len(file_extension) <= 4:
file_extension_set = frozenset(file_extension)
if file_extension_set \
and file_extension_set <= ALPHANUMERIC_CHARS \
and not file_extension_set <= NUMERIC_CHARS:
if file_extension in COMMON_TLD:
return False
file_type = mimetypes.guess_type(text, strict=False)[0]
if file_type:
return True
else:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_unlikely_link(text):
'''Return whether the text is likely to cause false positives.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
'''
# Check for string concatenation in JavaScript
if text[:1] in ',;+:' or text[-1:] in '.,;+:':
return True
# Check for unusual characters
if re.search(r'''[\\$()'"[\]{}|<>`]''', text):
return True
if text[:1] == '.' \
and not text.startswith('./') \
and not text.startswith('../'):
return True
if text in ('/', '//'):
return True
if '//' in text and '://' not in text and not text.startswith('//'):
return True
# Forbid strings like mimetypes
if text in MIMETYPES:
return True
tag_1, dummy, tag_2 = text.partition('.')
if tag_1 in HTML_TAGS and tag_2 != 'html':
return True
# Forbid things where the first part of the path looks like a domain name
if FIRST_PART_TLD_PATTERN.match(text):
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def identify_link_type(filename):
'''Return link type guessed by filename extension.
Returns:
str: A value from :class:`.item.LinkType`.
'''
mime_type = mimetypes.guess_type(filename)[0]
if not mime_type:
return
if mime_type == 'text/css':
return LinkType.css
elif mime_type == 'application/javascript':
return LinkType.javascript
elif mime_type == 'text/html' or mime_type.endswith('xml'):
return LinkType.html
elif mime_type.startswith('video') or \
mime_type.startswith('image') or \
mime_type.startswith('audio') or \
mime_type.endswith('shockwave-flash'):
return LinkType.media |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def new_encoded_stream(args, stream):
'''Return a stream writer.'''
if args.ascii_print:
return wpull.util.ASCIIStreamWriter(stream)
else:
return stream |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _schedule(self):
'''Schedule check function.'''
if self._running:
_logger.debug('Schedule check function.')
self._call_later_handle = self._event_loop.call_later(
self._timeout, self._check) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _check(self):
'''Check and close connection if needed.'''
_logger.debug('Check if timeout.')
self._call_later_handle = None
if self._touch_time is not None:
difference = self._event_loop.time() - self._touch_time
_logger.debug('Time difference %s', difference)
if difference > self._timeout:
self._connection.close()
self._timed_out = True
if not self._connection.closed():
self._schedule() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
'''Stop running timers.'''
if self._call_later_handle:
self._call_later_handle.cancel()
self._running = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def closed(self) -> bool:
'''Return whether the connection is closed.'''
return not self.writer or not self.reader or self.reader.at_eof() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def connect(self):
'''Establish a connection.'''
_logger.debug(__('Connecting to {0}.', self._address))
if self._state != ConnectionState.ready:
raise Exception('Closed connection must be reset before reusing.')
if self._sock:
connection_future = asyncio.open_connection(
sock=self._sock, **self._connection_kwargs()
)
else:
# TODO: maybe we don't want to ignore flow-info and scope-id?
host = self._address[0]
port = self._address[1]
connection_future = asyncio.open_connection(
host, port, **self._connection_kwargs()
)
self.reader, self.writer = yield from \
self.run_network_operation(
connection_future,
wait_timeout=self._connect_timeout,
name='Connect')
if self._timeout is not None:
self._close_timer = CloseTimer(self._timeout, self)
else:
self._close_timer = DummyCloseTimer()
self._state = ConnectionState.created
_logger.debug('Connected.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def readline(self) -> bytes:
'''Read a line of data.'''
assert self._state == ConnectionState.created, \
'Expect conn created. Got {}.'.format(self._state)
with self._close_timer.with_timeout():
data = yield from \
self.run_network_operation(
self.reader.readline(),
close_timeout=self._timeout,
name='Readline')
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run_network_operation(self, task, wait_timeout=None,
close_timeout=None,
name='Network operation'):
'''Run the task and raise appropriate exceptions.
Coroutine.
'''
if wait_timeout is not None and close_timeout is not None:
raise Exception(
'Cannot use wait_timeout and close_timeout at the same time')
try:
if close_timeout is not None:
with self._close_timer.with_timeout():
data = yield from task
if self._close_timer.is_timeout():
raise NetworkTimedOut(
'{name} timed out.'.format(name=name))
else:
return data
elif wait_timeout is not None:
data = yield from asyncio.wait_for(task, wait_timeout)
return data
else:
return (yield from task)
except asyncio.TimeoutError as error:
self.close()
raise NetworkTimedOut(
'{name} timed out.'.format(name=name)) from error
except (tornado.netutil.SSLCertificateError, SSLVerificationError) \
as error:
self.close()
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
except AttributeError as error:
self.close()
raise NetworkError(
'{name} network error: connection closed unexpectedly: {error}'
.format(name=name, error=error)) from error
except (socket.error, ssl.SSLError, OSError, IOError) as error:
self.close()
if isinstance(error, NetworkError):
raise
if error.errno == errno.ECONNREFUSED:
raise ConnectionRefused(
error.errno, os.strerror(error.errno)) from error
# XXX: This quality case brought to you by OpenSSL and Python.
# Example: _ssl.SSLError: [Errno 1] error:14094418:SSL
# routines:SSL3_READ_BYTES:tlsv1 alert unknown ca
error_string = str(error).lower()
if 'certificate' in error_string or 'unknown ca' in error_string:
raise SSLVerificationError(
'{name} certificate error: {error}'
.format(name=name, error=error)) from error
else:
if error.errno:
raise NetworkError(
error.errno, os.strerror(error.errno)) from error
else:
raise NetworkError(
'{name} network error: {error}'
.format(name=name, error=error)) from error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start_tls(self, ssl_context: Union[bool, dict, ssl.SSLContext]=True) \
-> 'SSLConnection':
'''Start client TLS on this connection and return SSLConnection.
Coroutine
'''
sock = self.writer.get_extra_info('socket')
ssl_conn = SSLConnection(
self._address,
ssl_context=ssl_context,
hostname=self._hostname, timeout=self._timeout,
connect_timeout=self._connect_timeout, bind_host=self._bind_host,
bandwidth_limiter=self._bandwidth_limiter, sock=sock
)
yield from ssl_conn.connect()
return ssl_conn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _verify_cert(self, sock: ssl.SSLSocket):
'''Check if certificate matches hostname.'''
# Based on tornado.iostream.SSLIOStream
# Needed for older OpenSSL (<0.9.8f) versions
verify_mode = self._ssl_context.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED,
ssl.CERT_OPTIONAL), \
'Unknown verify mode {}'.format(verify_mode)
if verify_mode == ssl.CERT_NONE:
return
cert = sock.getpeercert()
if not cert and verify_mode == ssl.CERT_OPTIONAL:
return
if not cert:
raise SSLVerificationError('No SSL certificate given')
try:
ssl.match_hostname(cert, self._hostname)
except ssl.CertificateError as error:
raise SSLVerificationError('Invalid SSL certificate') from error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def trim(self):
'''Remove items that are expired or exceed the max size.'''
now_time = time.time()
while self._seq and self._seq[0].expire_time < now_time:
item = self._seq.popleft()
del self._map[item.key]
if self._max_items:
while self._seq and len(self._seq) > self._max_items:
item = self._seq.popleft()
del self._map[item.key] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def strip_path_session_id(path):
'''Strip session ID from URL path.'''
for pattern in SESSION_ID_PATH_PATTERNS:
match = pattern.match(path)
if match:
path = match.group(1) + match.group(3)
return path |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rewrite(self, url_info: URLInfo) -> URLInfo:
'''Rewrite the given URL.'''
if url_info.scheme not in ('http', 'https'):
return url_info
if self._session_id_enabled:
url = '{scheme}://{authority}{path}?{query}#{fragment}'.format(
scheme=url_info.scheme,
authority=url_info.authority,
path=strip_path_session_id(url_info.path),
query=strip_query_session_id(url_info.query),
fragment=url_info.fragment,
)
url_info = parse_url_or_log(url) or url_info
if self._hash_fragment_enabled and url_info.fragment.startswith('!'):
if url_info.query:
url = '{}&_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
else:
url = '{}?_escaped_fragment_={}'.format(url_info.url,
url_info.fragment[1:])
url_info = parse_url_or_log(url) or url_info
return url_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_address(text: str) -> Tuple[str, int]:
'''Parse PASV address.'''
match = re.search(
r'\('
r'(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*,'
r'\s*(\d{1,3})\s*'
r'\)',
text)
if match:
return (
'{0}.{1}.{2}.{3}'.format(int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
int(match.group(4))
),
int(match.group(5)) << 8 | int(match.group(6))
)
else:
raise ValueError('No address found') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reply_code_tuple(code: int) -> Tuple[int, int, int]:
'''Return the reply code as a tuple.
Args:
code: The reply code.
Returns:
Each item in the tuple is the digit.
'''
return code // 100, code // 10 % 10, code % 10 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \
List[dict]:
'''Parse machine listing.
Args:
text: The listing.
convert: Convert sizes and dates.
strict: Method of handling errors. ``True`` will raise
``ValueError``. ``False`` will ignore rows with errors.
Returns:
list: A list of dict of the facts defined in RFC 3659.
The key names must be lowercase. The filename uses the key
``name``.
'''
# TODO: this function should be moved into the 'ls' package
listing = []
for line in text.splitlines(False):
facts = line.split(';')
row = {}
filename = None
for fact in facts:
name, sep, value = fact.partition('=')
if sep:
name = name.strip().lower()
value = value.strip().lower()
if convert:
try:
value = convert_machine_list_value(name, value)
except ValueError:
if strict:
raise
row[name] = value
else:
if name[0:1] == ' ':
# Is a filename
filename = name[1:]
else:
name = name.strip().lower()
row[name] = ''
if filename:
row['name'] = filename
listing.append(row)
elif strict:
raise ValueError('Missing filename.')
return listing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_machine_list_value(name: str, value: str) -> \
Union[datetime.datetime, str, int]:
'''Convert sizes and time values.
Size will be ``int`` while time value will be :class:`datetime.datetime`.
'''
if name == 'modify':
return convert_machine_list_time_val(value)
elif name == 'size':
return int(value)
else:
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_machine_list_time_val(text: str) -> datetime.datetime:
'''Convert RFC 3659 time-val to datetime objects.'''
# TODO: implement fractional seconds
text = text[:14]
if len(text) != 14:
raise ValueError('Time value not 14 chars')
year = int(text[0:4])
month = int(text[4:6])
day = int(text[6:8])
hour = int(text[8:10])
minute = int(text[10:12])
second = int(text[12:14])
return datetime.datetime(year, month, day, hour, minute, second,
tzinfo=datetime.timezone.utc) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def machine_listings_to_file_entries(listings: Iterable[dict]) -> \
Iterable[FileEntry]:
'''Convert results from parsing machine listings to FileEntry list.'''
for listing in listings:
yield FileEntry(
listing['name'],
type=listing.get('type'),
size=listing.get('size'),
date=listing.get('modify')
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def reply_code(self):
'''Return reply code.'''
if len(self.args) >= 2 and isinstance(self.args[1], int):
return self.args[1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _run_producer_wrapper(self):
'''Run the producer, if exception, stop engine.'''
try:
yield from self._producer.process()
except Exception as error:
if not isinstance(error, StopIteration):
# Stop the workers so the producer exception will be handled
# when we finally yield from this coroutine
_logger.debug('Producer died.', exc_info=True)
self.stop()
raise
else:
self.stop() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_cdx(file, encoding='utf8'):
'''Iterate CDX file.
Args:
file (str): A file object.
encoding (str): The encoding of the file.
Returns:
iterator: Each item is a dict that maps from field key to value.
'''
with codecs.getreader(encoding)(file) as stream:
header_line = stream.readline()
separator = header_line[0]
field_keys = header_line.strip().split(separator)
if field_keys.pop(0) != 'CDX':
raise ValueError('CDX header not found.')
for line in stream:
yield dict(zip(field_keys, line.strip().split(separator))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_common_fields(self, warc_type: str, content_type: str):
'''Set the required fields for the record.'''
self.fields[self.WARC_TYPE] = warc_type
self.fields[self.CONTENT_TYPE] = content_type
self.fields[self.WARC_DATE] = wpull.util.datetime_str()
self.fields[self.WARC_RECORD_ID] = '<{0}>'.format(uuid.uuid4().urn) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_content_length(self):
'''Find and set the content length.
.. seealso:: :meth:`compute_checksum`.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
with wpull.util.reset_file_offset(self.block_file):
wpull.util.seek_file_end(self.block_file)
self.fields['Content-Length'] = str(self.block_file.tell()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compute_checksum(self, payload_offset: Optional[int]=None):
'''Compute and add the checksum data to the record fields.
This function also sets the content length.
'''
if not self.block_file:
self.fields['Content-Length'] = '0'
return
block_hasher = hashlib.sha1()
payload_hasher = hashlib.sha1()
with wpull.util.reset_file_offset(self.block_file):
if payload_offset is not None:
data = self.block_file.read(payload_offset)
block_hasher.update(data)
while True:
data = self.block_file.read(4096)
if data == b'':
break
block_hasher.update(data)
payload_hasher.update(data)
content_length = self.block_file.tell()
content_hash = block_hasher.digest()
self.fields['WARC-Block-Digest'] = 'sha1:{0}'.format(
base64.b32encode(content_hash).decode()
)
if payload_offset is not None:
payload_hash = payload_hasher.digest()
self.fields['WARC-Payload-Digest'] = 'sha1:{0}'.format(
base64.b32encode(payload_hash).decode()
)
self.fields['Content-Length'] = str(content_length) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_http_header(self) -> Response:
'''Return the HTTP header.
It only attempts to read the first 4 KiB of the payload.
Returns:
Response, None: Returns an instance of
:class:`.http.request.Response` or None.
'''
with wpull.util.reset_file_offset(self.block_file):
data = self.block_file.read(4096)
match = re.match(br'(.*?\r?\n\r?\n)', data)
if not match:
return
status_line, dummy, field_str = match.group(1).partition(b'\n')
try:
version, code, reason = Response.parse_status_line(status_line)
except ValueError:
return
response = Response(status_code=code, reason=reason, version=version)
try:
response.fields.parse(field_str, strict=False)
except ValueError:
return
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _write_config(self):
'''Write the parameters to a file for PhantomJS to read.'''
param_dict = {
'url': self._params.url,
'snapshot_paths': self._params.snapshot_paths,
'wait_time': self._params.wait_time,
'num_scrolls': self._params.num_scrolls,
'smart_scroll': self._params.smart_scroll,
'snapshot': self._params.snapshot,
'viewport_width': self._params.viewport_size[0],
'viewport_height': self._params.viewport_size[1],
'paper_width': self._params.paper_size[0],
'paper_height': self._params.paper_size[1],
'custom_headers': self._params.custom_headers,
'page_settings': self._params.page_settings,
}
if self._params.event_log_filename:
param_dict['event_log_filename'] = \
os.path.abspath(self._params.event_log_filename)
if self._params.action_log_filename:
param_dict['action_log_filename'] = \
os.path.abspath(self._params.action_log_filename)
config_text = json.dumps(param_dict)
self._config_file.write(config_text.encode('utf-8'))
# Close it so the phantomjs process can read it on Windows
self._config_file.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def clean(self, force: bool=False):
'''Clean closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
'''
with (yield from self._lock):
for connection in tuple(self.ready):
if force or connection.closed():
connection.close()
self.ready.remove(connection) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
'''Forcibly close all connections.
This instance will not be usable after calling this method.
'''
for connection in self.ready:
connection.close()
for connection in self.busy:
connection.close()
self._closed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def acquire(self) -> Connection:
'''Register and return a connection.
Coroutine.
'''
assert not self._closed
yield from self._condition.acquire()
while True:
if self.ready:
connection = self.ready.pop()
break
elif len(self.busy) < self.max_connections:
connection = self._connection_factory()
break
else:
yield from self._condition.wait()
self.busy.add(connection)
self._condition.release()
return connection |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def release(self, connection: Connection, reuse: bool=True):
'''Unregister a connection.
Args:
connection: Connection instance returned from :meth:`acquire`.
reuse: If True, the connection is made available for reuse.
Coroutine.
'''
yield from self._condition.acquire()
self.busy.remove(connection)
if reuse:
self.ready.add(connection)
self._condition.notify()
self._condition.release() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def acquire(self, host: str, port: int, use_ssl: bool=False,
host_key: Optional[Any]=None) \
-> Union[Connection, SSLConnection]:
'''Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine.
'''
assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))
assert not self._closed
yield from self._process_no_wait_releases()
if use_ssl:
connection_factory = functools.partial(
self._ssl_connection_factory, hostname=host)
else:
connection_factory = functools.partial(
self._connection_factory, hostname=host)
connection_factory = functools.partial(
HappyEyeballsConnection, (host, port), connection_factory,
self._resolver, self._happy_eyeballs_table,
is_ssl=use_ssl
)
key = host_key or (host, port, use_ssl)
with (yield from self._host_pools_lock):
if key not in self._host_pools:
host_pool = self._host_pools[key] = HostPool(
connection_factory,
max_connections=self._max_host_count
)
self._host_pool_waiters[key] = 1
else:
host_pool = self._host_pools[key]
self._host_pool_waiters[key] += 1
_logger.debug('Check out %s', key)
connection = yield from host_pool.acquire()
connection.key = key
# TODO: Verify this assert is always true
# assert host_pool.count() <= host_pool.max_connections
# assert key in self._host_pools
# assert self._host_pools[key] == host_pool
with (yield from self._host_pools_lock):
self._host_pool_waiters[key] -= 1
return connection |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def release(self, connection: Connection):
'''Put a connection back in the pool.
Coroutine.
'''
assert not self._closed
key = connection.key
host_pool = self._host_pools[key]
_logger.debug('Check in %s', key)
yield from host_pool.release(connection)
force = self.count() > self._max_count
yield from self.clean(force=force) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def session(self, host: str, port: int, use_ssl: bool=False):
'''Return a context manager that returns a connection.
Usage::
session = yield from connection_pool.session('example.com', 80)
with session as connection:
connection.write(b'blah')
connection.close()
Coroutine.
'''
connection = yield from self.acquire(host, port, use_ssl)
@contextlib.contextmanager
def context_wrapper():
try:
yield connection
finally:
self.no_wait_release(connection)
return context_wrapper() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def clean(self, force: bool=False):
'''Clean all closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine.
'''
assert not self._closed
with (yield from self._host_pools_lock):
for key, pool in tuple(self._host_pools.items()):
yield from pool.clean(force=force)
if not self._host_pool_waiters[key] and pool.empty():
del self._host_pools[key]
del self._host_pool_waiters[key] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
'''Close all the connections and clean up.
This instance will not be usable after calling this method.
'''
for key, pool in tuple(self._host_pools.items()):
pool.close()
del self._host_pools[key]
del self._host_pool_waiters[key]
self._closed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count(self) -> int:
'''Return number of connections.'''
counter = 0
for pool in self._host_pools.values():
counter += pool.count()
return counter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_preferred(self, preferred_addr, addr_1, addr_2):
'''Set the preferred address.'''
if addr_1 > addr_2:
addr_1, addr_2 = addr_2, addr_1
self._cache[(addr_1, addr_2)] = preferred_addr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_preferred(self, addr_1, addr_2):
'''Return the preferred address.'''
if addr_1 > addr_2:
addr_1, addr_2 = addr_2, addr_1
return self._cache.get((addr_1, addr_2)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _connect_dual_stack(self, primary_address, secondary_address):
'''Connect using happy eyeballs.'''
self._primary_connection = self._connection_factory(primary_address)
self._secondary_connection = self._connection_factory(secondary_address)
@asyncio.coroutine
def connect_primary():
yield from self._primary_connection.connect()
return self._primary_connection
@asyncio.coroutine
def connect_secondary():
yield from self._secondary_connection.connect()
return self._secondary_connection
primary_fut = connect_primary()
secondary_fut = connect_secondary()
failed = False
for fut in asyncio.as_completed((primary_fut, secondary_fut)):
if not self._active_connection:
try:
self._active_connection = yield from fut
except NetworkError:
if not failed:
_logger.debug('Original dual stack exception', exc_info=True)
failed = True
else:
raise
else:
_logger.debug('Got first of dual stack.')
else:
@asyncio.coroutine
def cleanup():
try:
conn = yield from fut
except NetworkError:
pass
else:
conn.close()
_logger.debug('Closed abandoned connection.')
asyncio.get_event_loop().create_task(cleanup())
preferred_host = self._active_connection.host
self._happy_eyeballs_table.set_preferred(
preferred_host, primary_address[0], secondary_address[0]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]:
'''Get preferred host from DNS results.'''
host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None
host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None
if not host_2:
return host_1, None
elif not host_1:
return host_2, None
preferred_host = self._happy_eyeballs_table.get_preferred(
host_1, host_2)
if preferred_host:
return preferred_host, None
else:
return host_1, host_2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _check_journals_and_maybe_raise(self):
'''Check if any journal files exist and raise an error.'''
files = list(glob.glob(self._prefix_filename + '*-wpullinc'))
if files:
raise OSError('WARC file {} is incomplete.'.format(files[0])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _start_new_warc_file(self, meta=False):
'''Create and set as current WARC file.'''
if self._params.max_size and not meta and self._params.appending:
while True:
self._warc_filename = self._generate_warc_filename()
if os.path.exists(self._warc_filename):
_logger.debug('Skip {0}', self._warc_filename)
self._sequence_num += 1
else:
break
else:
self._warc_filename = self._generate_warc_filename(meta=meta)
_logger.debug('WARC file at {0}', self._warc_filename)
if not self._params.appending:
wpull.util.truncate_file(self._warc_filename)
self._warcinfo_record = WARCRecord()
self._populate_warcinfo(self._params.extra_fields)
self.write_record(self._warcinfo_record) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _generate_warc_filename(self, meta=False):
'''Return a suitable WARC filename.'''
if self._params.max_size is None:
sequence_name = ''
elif meta:
sequence_name = '-meta'
else:
sequence_name = '-{0:05d}'.format(self._sequence_num)
if self._params.compress:
extension = 'warc.gz'
else:
extension = 'warc'
return '{0}{1}.{2}'.format(
self._prefix_filename, sequence_name, extension
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _start_new_cdx_file(self):
'''Create and set current CDX file.'''
self._cdx_filename = '{0}.cdx'.format(self._prefix_filename)
if not self._params.appending:
wpull.util.truncate_file(self._cdx_filename)
self._write_cdx_header()
elif not os.path.exists(self._cdx_filename):
self._write_cdx_header() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _populate_warcinfo(self, extra_fields=None):
'''Add the metadata to the Warcinfo record.'''
self._warcinfo_record.set_common_fields(
WARCRecord.WARCINFO, WARCRecord.WARC_FIELDS)
info_fields = NameValueRecord(wrap_width=1024)
info_fields['Software'] = self._params.software_string \
or self.DEFAULT_SOFTWARE_STRING
info_fields['format'] = 'WARC File Format 1.0'
info_fields['conformsTo'] = \
'http://bibnum.bnf.fr/WARC/WARC_ISO_28500_version1_latestdraft.pdf'
if extra_fields:
for name, value in extra_fields:
info_fields.add(name, value)
self._warcinfo_record.block_file = io.BytesIO(
bytes(info_fields) + b'\r\n')
self._warcinfo_record.compute_checksum() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _setup_log(self):
'''Set up the logging file.'''
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self._log_temp_file = NamedTemporaryFile(
prefix='tmp-wpull-warc-',
dir=self._params.temp_dir,
suffix='.log.gz',
delete=False,
)
self._log_temp_file.close() # For Windows
self._log_handler = handler = logging.StreamHandler(
io.TextIOWrapper(
gzip.GzipFile(
filename=self._log_temp_file.name, mode='wb'
),
encoding='utf-8'
)
)
logger.setLevel(logging.DEBUG)
logger.debug('Wpull needs the root logger level set to DEBUG.')
handler.setFormatter(formatter)
logger.addHandler(handler)
handler.setLevel(logging.INFO) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _move_file_to_dest_dir(self, filename):
'''Move the file to the ``move_to`` directory.'''
assert self._params.move_to
if os.path.isdir(self._params.move_to):
_logger.debug('Moved {} to {}.', self._warc_filename,
self._params.move_to)
shutil.move(filename, self._params.move_to)
else:
_logger.error('{} is not a directory; not moving {}.',
self._params.move_to, filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def set_length_and_maybe_checksums(self, record, payload_offset=None):
'''Set the content length and possibly the checksums.'''
if self._params.digests:
record.compute_checksum(payload_offset)
else:
record.set_content_length() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_record(self, record):
'''Append the record to the WARC file.'''
# FIXME: probably not a good idea to modifiy arguments passed to us
# TODO: add extra gzip headers that wget uses
record.fields['WARC-Warcinfo-ID'] = self._warcinfo_record.fields[
WARCRecord.WARC_RECORD_ID]
_logger.debug('Writing WARC record {0}.',
record.fields['WARC-Type'])
if self._params.compress:
open_func = gzip.GzipFile
else:
open_func = open
# Use getsize to get actual file size. Avoid tell() because it may
# not be the raw file position.
if os.path.exists(self._warc_filename):
before_offset = os.path.getsize(self._warc_filename)
else:
before_offset = 0
journal_filename = self._warc_filename + '-wpullinc'
with open(journal_filename, 'w') as file:
file.write('wpull-journal-version:1\n')
file.write('offset:{}\n'.format(before_offset))
try:
with open_func(self._warc_filename, mode='ab') as out_file:
for data in record:
out_file.write(data)
except (OSError, IOError) as error:
_logger.info(
_('Rolling back file {filename} to length {length}.'),
filename=self._warc_filename, length=before_offset
)
with open(self._warc_filename, mode='wb') as out_file:
out_file.truncate(before_offset)
raise error
finally:
os.remove(journal_filename)
after_offset = os.path.getsize(self._warc_filename)
if self._cdx_filename:
raw_file_offset = before_offset
raw_file_record_size = after_offset - before_offset
self._write_cdx_field(
record, raw_file_record_size, raw_file_offset
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def close(self):
'''Close the WARC file and clean up any logging handlers.'''
if self._log_temp_file:
self._log_handler.flush()
logger = logging.getLogger()
logger.removeHandler(self._log_handler)
self._log_handler.stream.close()
log_record = WARCRecord()
log_record.block_file = gzip.GzipFile(
filename=self._log_temp_file.name
)
log_record.set_common_fields('resource', 'text/plain')
log_record.fields['WARC-Target-URI'] = \
'urn:X-wpull:log'
if self._params.max_size is not None:
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
self._start_new_warc_file(meta=True)
self.set_length_and_maybe_checksums(log_record)
self.write_record(log_record)
log_record.block_file.close()
try:
os.remove(self._log_temp_file.name)
except OSError:
_logger.exception('Could not close log temp file.')
self._log_temp_file = None
self._log_handler.close()
self._log_handler = None
if self._params.move_to is not None:
self._move_file_to_dest_dir(self._warc_filename)
if self._cdx_filename and self._params.move_to is not None:
self._move_file_to_dest_dir(self._cdx_filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _write_cdx_header(self):
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR)
out_file.write(self.CDX_DELIMINATOR.join((
'CDX',
'a', 'b', 'm', 's',
'k', 'S', 'V', 'g',
'u'
)))
out_file.write('\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset):
'''Write the CDX field if needed.'''
if record.fields[WARCRecord.WARC_TYPE] != WARCRecord.RESPONSE \
or not re.match(r'application/http; *msgtype *= *response',
record.fields[WARCRecord.CONTENT_TYPE]):
return
url = record.fields['WARC-Target-URI']
_logger.debug('Writing CDX record {0}.', url)
http_header = record.get_http_header()
if http_header:
mime_type = self.parse_mimetype(
http_header.fields.get('Content-Type', '')
) or '-'
response_code = str(http_header.status_code)
else:
mime_type = '-'
response_code = '-'
timestamp = str(int(
wpull.util.parse_iso8601_str(record.fields[WARCRecord.WARC_DATE])
))
checksum = record.fields.get('WARC-Payload-Digest', '')
if checksum.startswith('sha1:'):
checksum = checksum.replace('sha1:', '', 1)
else:
checksum = '-'
raw_file_record_size_str = str(raw_file_record_size)
raw_file_offset_str = str(raw_file_offset)
filename = os.path.basename(self._warc_filename)
record_id = record.fields[WARCRecord.WARC_RECORD_ID]
fields_strs = (
url,
timestamp,
mime_type,
response_code,
checksum,
raw_file_record_size_str,
raw_file_offset_str,
filename,
record_id
)
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR.join(fields_strs))
out_file.write('\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_mimetype(cls, value):
'''Return the MIME type from a Content-Type string.
Returns:
str, None: A string in the form ``type/subtype`` or None.
'''
match = re.match(r'([a-zA-Z0-9-]+/[a-zA-Z0-9-]+)', value)
if match:
return match.group(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _new_temp_file(self, hint='warcrecsess'):
'''Return new temp file.'''
return wpull.body.new_temp_file(
directory=self._temp_dir, hint=hint
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _record_revisit(self, payload_offset: int):
'''Record the revisit if possible.'''
fields = self._response_record.fields
ref_record_id = self._url_table.get_revisit_id(
fields['WARC-Target-URI'],
fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '')
)
if ref_record_id:
try:
self._response_record.block_file.truncate(payload_offset)
except TypeError:
self._response_record.block_file.seek(0)
data = self._response_record.block_file.read(payload_offset)
self._response_record.block_file.truncate()
self._response_record.block_file.seek(0)
self._response_record.block_file.write(data)
self._recorder.set_length_and_maybe_checksums(
self._response_record
)
fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT
fields['WARC-Refers-To'] = ref_record_id
fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI
fields['WARC-Truncated'] = 'length' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_quota_exceeded(self) -> bool:
'''Return whether the quota is exceeded.'''
if self.quota and self._url_table is not None:
return self.size >= self.quota and \
self._url_table.get_root_url_todo_count() == 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def increment_error(self, error: Exception):
'''Increment the error counter preferring base exceptions.'''
_logger.debug('Increment error %s', error)
for error_class in ERROR_PRIORITIES:
if isinstance(error, error_class):
self.errors[error_class] += 1
return
self.errors[type(error)] += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def link_head(self, node):
'''Add a node to the head. '''
assert not node.tail
old_head = self.head
if old_head:
assert old_head.tail == self
old_head.tail = node
node.head = old_head
node.tail = self
self.head = node |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def link_tail(self, node):
'''Add a node to the tail.'''
assert not node.head
old_tail = self.tail
if old_tail:
assert old_tail.head == self
old_tail.head = node
node.tail = old_tail
node.head = self
self.tail = node |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def unlink(self):
'''Remove this node and link any head or tail.'''
old_head = self.head
old_tail = self.tail
self.head = None
self.tail = None
if old_head:
old_head.tail = old_tail
if old_tail:
old_tail.head = old_head |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def prepare_for_send(self, full_url=False):
'''Modify the request to be suitable for HTTP server.
Args:
full_url (bool): Use full URL as the URI. By default, only
the path of the URL is given to the server.
'''
assert self.url
assert self.method
assert self.version
url_info = self.url_info
if 'Host' not in self.fields:
self.fields['Host'] = url_info.hostname_with_port
if not full_url:
if url_info.query:
self.resource_path = '{0}?{1}'.format(url_info.path, url_info.query)
else:
self.resource_path = url_info.path
else:
self.resource_path = url_info.url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_response(cls, response):
'''Return whether the document is likely to be CSS.'''
if 'css' in response.fields.get('content-type', '').lower():
return True
if response.body:
# Stylesheet mistakenly served as HTML
if 'html' in response.fields.get('content-type', '').lower():
return cls.is_file(response.body) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def is_file(cls, file):
'''Return whether the file is likely CSS.'''
peeked_data = wpull.string.printable_bytes(
wpull.util.peek_file(file)).lower()
if b'<html' in peeked_data:
return VeryFalse
if re.search(br'@import |color:|background[a-z-]*:|font[a-z-]*:',
peeked_data):
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start(self):
'''Begin fetching the next request.'''
self._current_session = session = self._http_client.session()
request = self.next_request()
assert request
if request.url_info.password or \
request.url_info.hostname_with_port in self._hostnames_with_auth:
self._add_basic_auth_header(request)
response = yield from session.start(request)
self._process_response(response)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def download(self, file: Optional[IO[bytes]]=None,
duration_timeout: Optional[float]=None):
'''Download content.
Args:
file: An optional file object for the document contents.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
Response: An instance of :class:`.http.request.Response`.
See :meth:`WebClient.session` for proper usage of this function.
Coroutine.
'''
yield from \
self._current_session.download(file, duration_timeout=duration_timeout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_response(self, response: Response):
'''Handle the response and update the internal state.'''
_logger.debug('Handling response')
self._redirect_tracker.load(response)
if self._redirect_tracker.is_redirect():
self._process_redirect()
self._loop_type = LoopType.redirect
elif response.status_code == http.client.UNAUTHORIZED and self._next_request.password:
self._process_authentication(response)
else:
self._next_request = None
self._loop_type = LoopType.normal
if self._cookie_jar:
self._extract_cookies(response)
if self._next_request:
self._add_cookies(self._next_request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _process_redirect(self):
'''Update the Redirect Tracker.'''
_logger.debug('Handling redirect.')
if self._redirect_tracker.exceeded():
raise ProtocolError('Too many redirects.')
try:
url = self._redirect_tracker.next_location()
if not url:
raise ProtocolError('Redirect location missing.')
if self._redirect_tracker.is_repeat():
_logger.debug('Got redirect is repeat.')
request = self._original_request.copy()
request.url = url
else:
request = self._request_factory(url)
request.prepare_for_send()
except ValueError as error:
raise ProtocolError('Invalid redirect location.') from error
self._next_request = request
_logger.debug('Updated next redirect request to {0}.'.format(request)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_cookie_referrer_host(self):
'''Return the referrer hostname.'''
referer = self._original_request.fields.get('Referer')
if referer:
return URLInfo.parse(referer).hostname
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _add_cookies(self, request: Request):
'''Add the cookie headers to the Request.'''
self._cookie_jar.add_cookie_header(
request, self._get_cookie_referrer_host()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _extract_cookies(self, response: Response):
'''Load the cookie headers from the Response.'''
self._cookie_jar.extract_cookies(
response, response.request, self._get_cookie_referrer_host()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def session(self, request: Request) -> WebSession:
'''Return a fetch session.
Args:
request: The request to be fetched.
Example usage::
client = WebClient()
session = client.session(Request('http://www.example.com'))
with session:
while not session.done():
request = session.next_request()
print(request)
response = yield from session.start()
print(response)
if session.done():
with open('myfile.html') as file:
yield from session.download(file)
else:
yield from session.download()
Returns:
WebSession
'''
return WebSession(
request,
http_client=self._http_client,
redirect_tracker=self._redirect_tracker_factory(),
request_factory=self._request_factory,
cookie_jar=self._cookie_jar,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def to_dir_path_url(url_info: URLInfo) -> str:
'''Return URL string with the path replaced with directory only.'''
dir_name = posixpath.dirname(url_info.path)
if not dir_name.endswith('/'):
url_template = 'ftp://{}{}/'
else:
url_template = 'ftp://{}{}'
return url_template.format(url_info.hostname_with_port, dir_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _prepare_request_file_vs_dir(self, request: Request) -> bool:
'''Check if file, modify request, and return whether is a file.
Coroutine.
'''
if self._item_session.url_record.link_type:
is_file = self._item_session.url_record.link_type == LinkType.file
elif request.url_info.path.endswith('/'):
is_file = False
else:
is_file = 'unknown'
if is_file == 'unknown':
files = yield from self._fetch_parent_path(request)
if not files:
return True
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename:
_logger.debug('Found entry in parent. Type {}',
file_entry.type)
is_file = file_entry.type != 'dir'
break
else:
_logger.debug('Did not find entry. Assume file.')
return True
if not is_file:
request.url = append_slash_to_path_url(request.url_info)
_logger.debug('Request URL changed to {}. Path={}.',
request.url, request.file_path)
return is_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _fetch_parent_path(self, request: Request, use_cache: bool=True):
'''Fetch parent directory and return list FileEntry.
Coroutine.
'''
directory_url = to_dir_path_url(request.url_info)
if use_cache:
if directory_url in self._processor.listing_cache:
return self._processor.listing_cache[directory_url]
directory_request = copy.deepcopy(request)
directory_request.url = directory_url
_logger.debug('Check if URL {} is file with {}.', request.url,
directory_url)
with self._processor.ftp_client.session() as session:
try:
yield from session.start_listing(directory_request)
except FTPServerError:
_logger.debug('Got an error. Assume is file.')
if use_cache:
self._processor.listing_cache[directory_url] = None
return
temp_file = tempfile.NamedTemporaryFile(
dir=self._item_session.app_session.root_path,
prefix='tmp-wpull-list'
)
with temp_file as file:
directory_response = yield from session.download_listing(
file, duration_timeout=self._fetch_rule.duration_timeout)
if use_cache:
self._processor.listing_cache[directory_url] = \
directory_response.files
return directory_response.files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _add_listing_links(self, response: ListingResponse):
'''Add links from file listing response.'''
base_url = response.request.url_info.url
if self._glob_pattern:
level = self._item_session.url_record.level
else:
level = None
for file_entry in response.files:
if self._glob_pattern and \
not fnmatch.fnmatchcase(file_entry.name, self._glob_pattern):
continue
if file_entry.type == 'dir':
linked_url = urljoin_safe(base_url, file_entry.name + '/')
elif file_entry.type in ('file', 'symlink', None):
if not self._processor.fetch_params.retr_symlinks and \
file_entry.type == 'symlink':
self._make_symlink(file_entry.name, file_entry.dest)
linked_url = None
else:
linked_url = urljoin_safe(base_url, file_entry.name)
else:
linked_url = None
if linked_url:
linked_url_info = parse_url_or_log(linked_url)
if linked_url_info:
verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]
if verdict:
if linked_url_info.path.endswith('/'):
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.directory)
else:
self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.file, level=level) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _make_symlink(self, link_name: str, link_target: str):
'''Make a symlink on the system.'''
path = self._file_writer_session.extra_resource_path('dummy')
if path:
dir_path = os.path.dirname(path)
symlink_path = os.path.join(dir_path, link_name)
_logger.debug('symlink {} -> {}', symlink_path, link_target)
os.symlink(link_target, symlink_path)
_logger.info(
_('Created symbolic link {symlink_path} to target {symlink_target}.'),
symlink_path=symlink_path,
symlink_target=link_target
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _apply_unix_permissions(self, request: Request, response: Response):
'''Fetch and apply Unix permissions.
Coroutine.
'''
files = yield from self._fetch_parent_path(request)
if not files:
return
filename = posixpath.basename(request.file_path)
for file_entry in files:
if file_entry.name == filename and file_entry.perm:
_logger.debug(
'Set chmod {} o{:o}.',
response.body.name, file_entry.perm
)
os.chmod(response.body.name, file_entry.perm) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_url_rewriter(cls, session: AppSession):
'''Build URL rewriter if needed.'''
if session.args.escaped_fragment or session.args.strip_session_id:
return session.factory.new(
'URLRewriter',
hash_fragment=session.args.escaped_fragment,
session_id=session.args.strip_session_id
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _build_url_filters(cls, session: AppSession):
'''Create the URL filter instances.
Returns:
A list of URL filter instances
'''
args = session.args
filters = [
HTTPSOnlyFilter() if args.https_only else SchemeFilter(),
RecursiveFilter(
enabled=args.recursive, page_requisites=args.page_requisites
),
FollowFTPFilter(follow=args.follow_ftp),
]
if args.no_parent:
filters.append(ParentFilter())
if args.domains or args.exclude_domains:
filters.append(
BackwardDomainFilter(args.domains, args.exclude_domains)
)
if args.hostnames or args.exclude_hostnames:
filters.append(
HostnameFilter(args.hostnames, args.exclude_hostnames)
)
if args.tries:
filters.append(TriesFilter(args.tries))
if args.level and args.recursive or args.page_requisites_level:
filters.append(
LevelFilter(args.level,
inline_max_depth=args.page_requisites_level)
)
if args.accept_regex or args.reject_regex:
filters.append(RegexFilter(args.accept_regex, args.reject_regex))
if args.include_directories or args.exclude_directories:
filters.append(
DirectoryFilter(
args.include_directories, args.exclude_directories
)
)
if args.accept or args.reject:
filters.append(BackwardFilenameFilter(args.accept, args.reject))
return filters |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.