text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_document_converter(cls, session: AppSession): '''Build the Document Converter.''' if not session.args.convert_links: return converter = session.factory.new( 'BatchDocumentConverter', session.factory['HTMLParser'], session.factory['ElementWalker'], session.factory['URLTable'], backup=session.args.backup_converted ) return converter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _setup_logging(cls, args): '''Set up the root logger if needed. The root logger is set the appropriate level so the file and WARC logs work correctly. ''' assert ( logging.CRITICAL > logging.ERROR > logging.WARNING > logging.INFO > logging.DEBUG > logging.NOTSET ) assert ( LOG_VERY_QUIET > LOG_QUIET > LOG_NO_VERBOSE > LOG_VERBOSE > LOG_DEBUG ) assert args.verbosity root_logger = logging.getLogger() current_level = root_logger.getEffectiveLevel() min_level = LOG_VERY_QUIET if args.verbosity == LOG_QUIET: min_level = logging.ERROR if args.verbosity in (LOG_NO_VERBOSE, LOG_VERBOSE) \ or args.warc_file \ or args.output_file or args.append_output: min_level = logging.INFO if args.verbosity == LOG_DEBUG: min_level = logging.DEBUG if current_level > min_level: root_logger.setLevel(min_level) root_logger.debug( 'Wpull needs the root logger level set to {0}.' .format(min_level) ) if current_level <= logging.INFO: logging.captureWarnings(True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _setup_console_logger(cls, session: AppSession, args, stderr): '''Set up the console logger. A handler and with a formatter is added to the root logger. ''' stream = new_encoded_stream(args, stderr) logger = logging.getLogger() session.console_log_handler = handler = logging.StreamHandler(stream) formatter = logging.Formatter('%(levelname)s %(message)s') log_filter = logging.Filter('wpull') handler.setFormatter(formatter) handler.setLevel(args.verbosity or logging.INFO) handler.addFilter(log_filter) logger.addHandler(handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _setup_file_logger(cls, session: AppSession, args): '''Set up the file message logger. A file log handler and with a formatter is added to the root logger. ''' if not (args.output_file or args.append_output): return logger = logging.getLogger() formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') if args.output_file: filename = args.output_file mode = 'w' else: filename = args.append_output mode = 'a' session.file_log_handler = handler = logging.FileHandler( filename, mode, encoding='utf-8') handler.setFormatter(formatter) logger.addHandler(handler) if args.verbosity == logging.DEBUG: handler.setLevel(logging.DEBUG) else: handler.setLevel(logging.INFO)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _run_driver(self, item_session: ItemSession, request, response): '''Start PhantomJS processing.''' _logger.debug('Started PhantomJS processing.') session = PhantomJSCoprocessorSession( self._phantomjs_driver_factory, self._root_path, self._processing_rule, self._file_writer_session, request, response, item_session, self._phantomjs_params, self._warc_recorder ) with contextlib.closing(session): yield from session.run() _logger.debug('Ended PhantomJS processing.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _add_warc_action_log(self, path, url): '''Add the action log to the WARC file.''' _logger.debug('Adding action log record.') actions = [] with open(path, 'r', encoding='utf-8', errors='replace') as file: for line in file: actions.append(json.loads(line)) log_data = json.dumps( {'actions': actions}, indent=4, ).encode('utf-8') self._action_warc_record = record = WARCRecord() record.set_common_fields('metadata', 'application/json') record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \ .format(wpull.url.percent_encode_query_value(url)) record.block_file = io.BytesIO(log_data) self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _add_warc_snapshot(self, filename, url): '''Add the snaphot to the WARC file.''' _logger.debug('Adding snapshot record.') extension = os.path.splitext(filename)[1] content_type = { '.pdf': 'application/pdf', '.html': 'text/html', '.png': 'image/png', '.gif': 'image/gif' }[extension] record = WARCRecord() record.set_common_fields('resource', content_type) record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \ .format(wpull.url.percent_encode_query_value(url)) if self._action_warc_record: record.fields['WARC-Concurrent-To'] = \ self._action_warc_record.fields[WARCRecord.WARC_RECORD_ID] with open(filename, 'rb') as in_file: record.block_file = in_file self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _scrape_document(self): '''Extract links from the DOM.''' mock_response = self._new_mock_response( self._response, self._get_temp_path('phantom', '.html') ) self._item_session.request = self._request self._item_session.response = mock_response self._processing_rule.scrape_document(item_session) if mock_response.body: mock_response.body.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _new_mock_response(self, response, file_path): '''Return a new mock Response with the content.''' mock_response = copy.copy(response) mock_response.body = Body(open(file_path, 'rb')) mock_response.fields = NameValueRecord() for name, value in response.fields.get_all(): mock_response.fields.add(name, value) mock_response.fields['Content-Type'] = 'text/html; charset="utf-8"' return mock_response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_ssl_context(cls, session: AppSession) -> ssl.SSLContext: '''Create the SSL options. The options must be accepted by the `ssl` module. ''' args = session.args # Logic is based on tornado.netutil.ssl_options_to_context ssl_context = ssl.SSLContext(args.secure_protocol) if args.check_certificate: ssl_context.verify_mode = ssl.CERT_REQUIRED cls._load_ca_certs(session) ssl_context.load_verify_locations(session.ca_certs_filename) else: ssl_context.verify_mode = ssl.CERT_NONE if args.strong_crypto: ssl_context.options |= ssl.OP_NO_SSLv2 ssl_context.options |= ssl.OP_NO_SSLv3 # POODLE if hasattr(ssl, 'OP_NO_COMPRESSION'): ssl_context.options |= ssl.OP_NO_COMPRESSION # CRIME else: _logger.warning(_('Unable to disable TLS compression.')) if args.certificate: ssl_context.load_cert_chain(args.certificate, args.private_key) if args.edg_file: ssl.RAND_egd(args.edg_file) if args.random_file: with open(args.random_file, 'rb') as in_file: # Use 16KB because Wget ssl.RAND_add(in_file.read(15360), 0.0) return ssl_context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _load_ca_certs(cls, session: AppSession, clean: bool=True): '''Load the Certificate Authority certificates. ''' args = session.args if session.ca_certs_filename: return session.ca_certs_filename certs = set() if args.use_internal_ca_certs: pem_filename = os.path.join( os.path.dirname(__file__), '..', '..', 'cert', 'ca-bundle.pem' ) certs.update(cls._read_pem_file(pem_filename, from_package=True)) if args.ca_directory: if os.path.isdir(args.ca_directory): for filename in os.listdir(args.ca_directory): if os.path.isfile(filename): certs.update(cls._read_pem_file(filename)) else: _logger.warning(__( _('Certificate directory {path} does not exist.'), path=args.ca_directory )) if args.ca_certificate: if os.path.isfile(args.ca_certificate): certs.update(cls._read_pem_file(args.ca_certificate)) else: _logger.warning(__( _('Certificate file {path} does not exist.'), path=args.ca_certificate )) session.ca_certs_filename = certs_filename = tempfile.mkstemp( suffix='.pem', prefix='tmp-wpull-')[1] def clean_certs_file(): os.remove(certs_filename) if clean: atexit.register(clean_certs_file) with open(certs_filename, 'w+b') as certs_file: for cert in certs: certs_file.write(cert) _logger.debug('CA certs loaded.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_pem_file(cls, filename, from_package=False): '''Read the PEM file. Returns: iterable: An iterable of certificates. The certificate data is :class:`byte`. ''' _logger.debug('Reading PEM {0}.'.format(filename)) if from_package: return wpull.util.filter_pem(wpull.util.get_package_data(filename)) with open(filename, 'rb') as in_file: return wpull.util.filter_pem(in_file.read())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def start(self, request: Request) -> Response: '''Begin a HTTP request Args: request: Request information. Returns: A response populated with the HTTP headers. Once the headers are received, call :meth:`download`. Coroutine. ''' if self._session_state != SessionState.ready: raise RuntimeError('Session already started') assert not self._request self._request = request _logger.debug(__('Client fetch request {0}.', request)) connection = yield from self._acquire_request_connection(request) full_url = connection.proxied and not connection.tunneled self._stream = stream = self._stream_factory(connection) yield from self._stream.reconnect() request.address = connection.address self.event_dispatcher.notify(self.Event.begin_request, request) write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data) stream.data_event_dispatcher.add_write_listener(write_callback) yield from stream.write_request(request, full_url=full_url) if request.body: assert 'Content-Length' in request.fields length = int(request.fields['Content-Length']) yield from stream.write_body(request.body, length=length) stream.data_event_dispatcher.remove_write_listener(write_callback) self.event_dispatcher.notify(self.Event.end_request, request) read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data) stream.data_event_dispatcher.add_read_listener(read_callback) self._response = response = yield from stream.read_response() response.request = request self.event_dispatcher.notify(self.Event.begin_response, response) self._session_state = SessionState.request_sent return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_bytes(instance, encoding='utf-8', error='strict'): '''Convert an instance recursively to bytes.''' if isinstance(instance, bytes): return instance elif hasattr(instance, 'encode'): return instance.encode(encoding, error) elif isinstance(instance, list): return list([to_bytes(item, encoding, error) for item in instance]) elif isinstance(instance, tuple): return tuple([to_bytes(item, encoding, error) for item in instance]) elif isinstance(instance, dict): return dict( [(to_bytes(key, encoding, error), to_bytes(value, encoding, error)) for key, value in instance.items()]) else: return instance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_str(instance, encoding='utf-8'): '''Convert an instance recursively to string.''' if isinstance(instance, str): return instance elif hasattr(instance, 'decode'): return instance.decode(encoding) elif isinstance(instance, list): return list([to_str(item, encoding) for item in instance]) elif isinstance(instance, tuple): return tuple([to_str(item, encoding) for item in instance]) elif isinstance(instance, dict): return dict( [(to_str(key, encoding), to_str(value, encoding)) for key, value in instance.items()]) else: return instance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def detect_encoding(data, encoding=None, fallback='latin1', is_html=False): '''Detect the character encoding of the data. Returns: str: The name of the codec Raises: ValueError: The codec could not be detected. This error can only occur if fallback is not a "lossless" codec. ''' if encoding: encoding = normalize_codec_name(encoding) bs4_detector = EncodingDetector( data, override_encodings=(encoding,) if encoding else (), is_html=is_html ) candidates = itertools.chain(bs4_detector.encodings, (fallback,)) for candidate in candidates: if not candidate: continue candidate = normalize_codec_name(candidate) if not candidate: continue if candidate == 'ascii' and fallback != 'ascii': # it's never ascii :) # Falling back on UTF-8/CP-1252/Latin-1 reduces chance of # failure continue if try_decoding(data, candidate): return candidate raise ValueError('Unable to detect encoding.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def try_decoding(data, encoding): '''Return whether the Python codec could decode the data.''' try: data.decode(encoding, 'strict') except UnicodeError: # Data under 16 bytes is very unlikely to be truncated if len(data) > 16: for trim in (1, 2, 3): trimmed_data = data[:-trim] if trimmed_data: try: trimmed_data.decode(encoding, 'strict') except UnicodeError: continue else: return True return False else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def format_size(num, format_str='{num:.1f} {unit}'): '''Format the file size into a human readable text. http://stackoverflow.com/a/1094933/1524507 ''' for unit in ('B', 'KiB', 'MiB', 'GiB'): if -1024 < num < 1024: return format_str.format(num=num, unit=unit) num /= 1024.0 return format_str.format(num=num, unit='TiB')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def printable_str(text, keep_newlines=False): '''Escape any control or non-ASCII characters from string. This function is intended for use with strings from an untrusted source such as writing to a console or writing to logs. It is designed to prevent things like ANSI escape sequences from showing. Use :func:`repr` or :func:`ascii` instead for things such as Exception messages. ''' if isinstance(text, str): new_text = ascii(text)[1:-1] else: new_text = ascii(text) if keep_newlines: new_text = new_text.replace('\\r', '\r').replace('\\n', '\n') return new_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_status(self): '''Print an entire status line including bar and stats.''' self._clear_line() self._print(' ') if self.max_value: self._print_percent() self._print(' ') self._print_bar() else: self._print_throbber() self._print(' ') if self.measurement == Measurement.bytes: self._print_size_downloaded() else: self._print(self.current_value) self._print(' ') self._print_duration() self._print(' ') if self.measurement == Measurement.bytes: self._print_speed() self._flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_throbber(self): '''Print an indefinite progress bar.''' self._print('[') for position in range(self._bar_width): self._print('O' if position == self._throbber_index else ' ') self._print(']') self._throbber_index = next(self._throbber_iter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_bar(self): '''Print a progress bar.''' self._print('[') for position in range(self._bar_width): position_fraction = position / (self._bar_width - 1) position_bytes = position_fraction * self.max_value if position_bytes < (self.continue_value or 0): self._print('+') elif position_bytes <= (self.continue_value or 0) + self.current_value: self._print('=') else: self._print(' ') self._print(']')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_duration(self): '''Print the elapsed download time.''' duration = int(time.time() - self._start_time) self._print(datetime.timedelta(seconds=duration))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_speed(self): '''Print the current speed.''' if self._bandwidth_meter.num_samples: speed = self._bandwidth_meter.speed() if self._human_format: file_size_str = wpull.string.format_size(speed) else: file_size_str = '{:.1f} b'.format(speed * 8) speed_str = _('{preformatted_file_size}/s').format( preformatted_file_size=file_size_str ) else: speed_str = _('-- B/s') self._print(speed_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _print_percent(self): '''Print how much is done in percentage.''' fraction_done = ((self.continue_value or 0 + self.current_value) / self.max_value) self._print('{fraction_done:.1%}'.format(fraction_done=fraction_done))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def register(self, name: str): '''Register hooks that can be connected.''' if name in self._callbacks: raise ValueError('Hook already registered') self._callbacks[name] = None if self._event_dispatcher is not None: self._event_dispatcher.register(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def unregister(self, name: str): '''Unregister hook.''' del self._callbacks[name] if self._event_dispatcher is not None: self._event_dispatcher.unregister(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def connect(self, name, callback): '''Add callback to hook.''' if not self._callbacks[name]: self._callbacks[name] = callback else: raise HookAlreadyConnectedError('Callback hook already connected.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def call(self, name: str, *args, **kwargs): '''Invoke the callback.''' if self._event_dispatcher is not None: self._event_dispatcher.notify(name, *args, **kwargs) if self._callbacks[name]: return self._callbacks[name](*args, **kwargs) else: raise HookDisconnected('No callback is connected.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_crawl_delay(self, user_agent): """Returns a float representing the crawl delay specified for this user agent, or None if the crawl delay was unspecified or not a float. """
# See is_allowed() comment about the explicit unicode conversion. if (PY_MAJOR_VERSION < 3) and (not isinstance(user_agent, unicode)): user_agent = user_agent.decode() for ruleset in self.__rulesets: if ruleset.does_user_agent_match(user_agent): return ruleset.crawl_delay return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, s): """Parses the passed string as a set of robots.txt rules."""
self._sitemaps = [ ] self.__rulesets = [ ] if (PY_MAJOR_VERSION > 2) and (isinstance(s, bytes) or isinstance(s, bytearray)) or \ (PY_MAJOR_VERSION == 2) and (not isinstance(s, unicode)): s = s.decode("iso-8859-1") # Normalize newlines. s = _end_of_line_regex.sub("\n", s) lines = s.split("\n") previous_line_was_a_user_agent = False current_ruleset = None for line in lines: line = line.strip() if line and line[0] == '#': # "Lines containing only a comment are discarded completely, # and therefore do not indicate a record boundary." (MK1994) pass else: # Remove comments i = line.find("#") if i != -1: line = line[:i] line = line.strip() if not line: # An empty line indicates the end of a ruleset. if current_ruleset and current_ruleset.is_not_empty(): self.__rulesets.append(current_ruleset) current_ruleset = None previous_line_was_a_user_agent = False else: # Each non-empty line falls into one of six categories: # 1) User-agent: blah blah blah # 2) Disallow: blah blah blah # 3) Allow: blah blah blah # 4) Crawl-delay: blah blah blah # 5) Sitemap: blah blah blah # 6) Everything else # 1 - 5 are interesting and I find them with the regex # below. Category 6 I discard as directed by the MK1994 # ("Unrecognised headers are ignored.") # Note that 4 & 5 are specific to GYM2008 syntax, but # respecting them here is not a problem. They're just # additional information the the caller is free to ignore. matches = _directive_regex.findall(line) # Categories 1 - 5 produce two matches, #6 produces none. if matches: field, data = matches[0] field = field.lower() data = _scrub_data(data) # Matching "useragent" is a deviation from the # MK1994/96 which permits only "user-agent". if field in ("useragent", "user-agent"): if previous_line_was_a_user_agent: # Add this UA to the current ruleset if current_ruleset and data: current_ruleset.add_robot_name(data) else: # Save the current ruleset and start a new one. if current_ruleset and current_ruleset.is_not_empty(): self.__rulesets.append(current_ruleset) #else: # (is_not_empty() == False) ==> malformed # robots.txt listed a UA line but provided # no name or didn't provide any rules # for a named UA. current_ruleset = _Ruleset() if data: current_ruleset.add_robot_name(data) previous_line_was_a_user_agent = True elif field == "allow": previous_line_was_a_user_agent = False if current_ruleset: current_ruleset.add_allow_rule(data) elif field == "sitemap": previous_line_was_a_user_agent = False self._sitemaps.append(data) elif field == "crawl-delay": # Only Yahoo documents the syntax for Crawl-delay. # ref: http://help.yahoo.com/l/us/yahoo/search/webcrawler/slurp-03.html previous_line_was_a_user_agent = False if current_ruleset: try: current_ruleset.crawl_delay = float(data) except ValueError: # Invalid crawl-delay -- ignore. pass else: # This is a disallow line previous_line_was_a_user_agent = False if current_ruleset: current_ruleset.add_disallow_rule(data) if current_ruleset and current_ruleset.is_not_empty(): self.__rulesets.append(current_ruleset) # Now that I have all the rulesets, I want to order them in a way # that makes comparisons easier later. Specifically, any ruleset that # contains the default user agent '*' should go at the end of the list # so that I only apply the default as a last resort. According to # MK1994/96, there should only be one ruleset that specifies * as the # user-agent, but you know how these things go. not_defaults = [r for r in self.__rulesets if not r.is_default()] defaults = [r for r in self.__rulesets if r.is_default()] self.__rulesets = not_defaults + defaults
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close_stream_on_error(func): '''Decorator to close stream on error.''' @asyncio.coroutine @functools.wraps(func) def wrapper(self, *args, **kwargs): with wpull.util.close_on_error(self.close): return (yield from func(self, *args, **kwargs)) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def url_to_filename(url, index='index.html', alt_char=False): '''Return a filename from a URL. Args: url (str): The URL. index (str): If a filename could not be derived from the URL path, use index instead. For example, ``/images/`` will return ``index.html``. alt_char (bool): If True, the character for the query deliminator will be ``@`` intead of ``?``. This function does not include the directories and does not sanitize the filename. Returns: str ''' assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url)) url_split_result = urllib.parse.urlsplit(url) filename = url_split_result.path.split('/')[-1] if not filename: filename = index if url_split_result.query: if alt_char: query_delim = '@' else: query_delim = '?' filename = '{0}{1}{2}'.format( filename, query_delim, url_split_result.query ) return filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def url_to_dir_parts(url, include_protocol=False, include_hostname=False, alt_char=False): '''Return a list of directory parts from a URL. Args: url (str): The URL. include_protocol (bool): If True, the scheme from the URL will be included. include_hostname (bool): If True, the hostname from the URL will be included. alt_char (bool): If True, the character for the port deliminator will be ``+`` intead of ``:``. This function does not include the filename and the paths are not sanitized. Returns: list ''' assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url)) url_split_result = urllib.parse.urlsplit(url) parts = [] if include_protocol: parts.append(url_split_result.scheme) if include_hostname: hostname = url_split_result.hostname if url_split_result.port: if alt_char: port_delim = '+' else: port_delim = ':' hostname = '{0}{1}{2}'.format( hostname, port_delim, url_split_result.port ) parts.append(hostname) for path_part in url_split_result.path.split('/'): if path_part: parts.append(path_part) if not url.endswith('/') and parts: parts.pop() return parts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def safe_filename(filename, os_type='unix', no_control=True, ascii_only=True, case=None, encoding='utf8', max_length=None): '''Return a safe filename or path part. Args: filename (str): The filename or path component. os_type (str): If ``unix``, escape the slash. If ``windows``, escape extra Windows characters. no_control (bool): If True, escape control characters. ascii_only (bool): If True, escape non-ASCII characters. case (str): If ``lower``, lowercase the string. If ``upper``, uppercase the string. encoding (str): The character encoding. max_length (int): The maximum length of the filename. This function assumes that `filename` has not already been percent-encoded. Returns: str ''' assert isinstance(filename, str), \ 'Expect str. Got {}.'.format(type(filename)) if filename in ('.', os.curdir): new_filename = '%2E' elif filename in ('.', os.pardir): new_filename = '%2E%2E' else: unix = os_type == 'unix' windows = os_type == 'windows' encoder_args = (unix, no_control, windows, ascii_only) if encoder_args not in _encoder_cache: _encoder_cache[encoder_args] = PercentEncoder( unix=unix, control=no_control, windows=windows, ascii_=ascii_only ) encoder = _encoder_cache[encoder_args] encoded_filename = filename.encode(encoding) new_filename = encoder.quote(encoded_filename).decode(encoding) if os_type == 'windows': if new_filename[-1] in ' .': new_filename = '{0}{1:02X}'.format( new_filename[:-1], new_filename[-1] ) if max_length and len(new_filename) > max_length: hash_obj = hashlib.sha1(new_filename.encode(encoding)) new_length = max(0, max_length - 8) new_filename = '{0}{1}'.format( new_filename[:new_length], hash_obj.hexdigest()[:8] ) if case == 'lower': new_filename = new_filename.lower() elif case == 'upper': new_filename = new_filename.upper() return new_filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def anti_clobber_dir_path(dir_path, suffix='.d'): '''Return a directory path free of filenames. Args: dir_path (str): A directory path. suffix (str): The suffix to append to the part of the path that is a file. Returns: str ''' dir_path = os.path.normpath(dir_path) parts = dir_path.split(os.sep) for index in range(len(parts)): test_path = os.sep.join(parts[:index + 1]) if os.path.isfile(test_path): parts[index] += suffix return os.sep.join(parts) return dir_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_content_disposition(text): '''Parse a Content-Disposition header value.''' match = re.search(r'filename\s*=\s*(.+)', text, re.IGNORECASE) if not match: return filename = match.group(1) if filename[0] in '"\'': match = re.match(r'(.)(.+)(?!\\)\1', filename) if match: filename = match.group(2).replace('\\"', '"') return filename else: filename = filename.partition(';')[0].strip() return filename
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def safe_filename(self, part): '''Return a safe filename or file part.''' return safe_filename( part, os_type=self._os_type, no_control=self._no_control, ascii_only=self._ascii_only, case=self._case, max_length=self._max_filename_length, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _warn_unsafe_options(cls, args): '''Print warnings about any enabled hazardous options. This function will print messages complaining about: * ``--save-headers`` * ``--no-iri`` * ``--output-document`` * ``--ignore-fatal-errors`` ''' enabled_options = [] for option_name in cls.UNSAFE_OPTIONS: if getattr(args, option_name): enabled_options.append(option_name) if enabled_options: _logger.warning(__( _('The following unsafe options are enabled: {list}.'), list=enabled_options )) _logger.warning( _('The use of unsafe options may lead to unexpected behavior ' 'or file corruption.')) if not args.retr_symlinks: _logger.warning( _('The --retr-symlinks=off option is a security risk.') )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _warn_silly_options(cls, args): '''Print warnings about any options that may be silly.''' if 'page-requisites' in args.span_hosts_allow \ and not args.page_requisites: _logger.warning( _('Spanning hosts is allowed for page requisites, ' 'but the page requisites option is not on.') ) if 'linked-pages' in args.span_hosts_allow \ and not args.recursive: _logger.warning( _('Spanning hosts is allowed for linked pages, ' 'but the recursive option is not on.') ) if args.warc_file and \ (args.http_proxy or args.https_proxy): _logger.warning(_('WARC specifications do not handle proxies.')) if (args.password or args.ftp_password or args.http_password or args.proxy_password) and \ args.warc_file: _logger.warning( _('Your password is recorded in the WARC file.'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_month(text: str) -> int: '''Parse month string into integer.''' text = text.lower() try: return MONTH_MAP[text] except KeyError: pass try: return MONTH_MAP[text[:3]] except KeyError: pass raise ValueError('Month {} not found.'.format(repr(text)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def y2k(year: int) -> int: '''Convert two digit year to four digit year.''' assert 0 <= year <= 99, 'Not a two digit year {}'.format(year) return year + 1000 if year >= 69 else year + 2000
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_cldr_json(directory, language_codes=DEFAULT_LANGUAGE_CODES, massage=True): '''Parse CLDR JSON datasets to for date time things.''' am_strings = set() pm_strings = set() month_to_int = {} for lang in language_codes: path = os.path.join(directory, 'main', lang, 'ca-gregorian.json') with open(path) as in_file: doc = json.load(in_file) months_dict = doc['main'][lang]['dates']['calendars']['gregorian']['months']['format']['abbreviated'] day_periods_dict = doc['main'][lang]['dates']['calendars']['gregorian']['dayPeriods']['format']['abbreviated'] for month, month_str in months_dict.items(): if massage: month_str = unicodedata.normalize('NFKD', month_str).lower().strip('.') month_to_int[month_str] = int(month) am_str = day_periods_dict['am'] pm_str = day_periods_dict['pm'] if massage: am_str = unicodedata.normalize('NFKD', am_str).lower().strip('.') pm_str = unicodedata.normalize('NFKD', pm_str).lower().strip('.') am_strings.add(am_str) pm_strings.add(pm_str) print(pprint.pformat(am_strings)) print(pprint.pformat(pm_strings)) print(pprint.pformat(month_to_int))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def acquire_proxy(self, host, port, use_ssl=False, host_key=None, tunnel=True): '''Check out a connection. This function is the same as acquire but with extra arguments concerning proxies. Coroutine. ''' if self._host_filter and not self._host_filter.test(host): connection = yield from \ super().acquire(host, port, use_ssl, host_key) return connection host_key = host_key or (host, port, use_ssl) proxy_host, proxy_port = self._proxy_address connection = yield from super().acquire( proxy_host, proxy_port, self._proxy_ssl, host_key=host_key ) connection.proxied = True _logger.debug('Request for proxy connection.') if connection.closed(): _logger.debug('Connecting to proxy.') yield from connection.connect() if tunnel: yield from self._establish_tunnel(connection, (host, port)) if use_ssl: ssl_connection = yield from connection.start_tls(self._ssl_context) ssl_connection.proxied = True ssl_connection.tunneled = True self._connection_map[ssl_connection] = connection connection.wrapped_connection = ssl_connection return ssl_connection if connection.wrapped_connection: ssl_connection = connection.wrapped_connection self._connection_map[ssl_connection] = connection return ssl_connection else: return connection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _establish_tunnel(self, connection, address): '''Establish a TCP tunnel. Coroutine. ''' host = '[{}]'.format(address[0]) if ':' in address[0] else address[0] port = address[1] request = RawRequest('CONNECT', '{0}:{1}'.format(host, port)) self.add_auth_header(request) stream = Stream(connection, keep_alive=True) _logger.debug('Sending Connect.') yield from stream.write_request(request) _logger.debug('Read proxy response.') response = yield from stream.read_response() if response.status_code != 200: debug_file = io.BytesIO() _logger.debug('Read proxy response body.') yield from stream.read_body(request, response, file=debug_file) debug_file.seek(0) _logger.debug(ascii(debug_file.read())) if response.status_code == 200: connection.tunneled = True else: raise NetworkError( 'Proxy does not support CONNECT: {} {}' .format(response.status_code, wpull.string.printable_str(response.reason)) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_file(cls, file): '''Return whether the file is likely to be HTML.''' peeked_data = wpull.string.printable_bytes( wpull.util.peek_file(file)).lower() if b'<!doctype html' in peeked_data \ or b'<head' in peeked_data \ or b'<title' in peeked_data \ or b'<html' in peeked_data \ or b'<script' in peeked_data \ or b'<table' in peeked_data \ or b'<a href' in peeked_data: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def convert_http_request(request, referrer_host=None): '''Convert a HTTP request. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): The referrering hostname or IP address. Returns: Request: An instance of :class:`urllib.request.Request` ''' new_request = urllib.request.Request( request.url_info.url, origin_req_host=referrer_host, ) for name, value in request.fields.get_all(): new_request.add_header(name, value) return new_request
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_cookie_header(self, request, referrer_host=None): '''Wrapped ``add_cookie_header``. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' new_request = convert_http_request(request, referrer_host) self._cookie_jar.add_cookie_header(new_request) request.fields.clear() for name, value in new_request.header_items(): request.fields.add(name, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def extract_cookies(self, response, request, referrer_host=None): '''Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL. ''' new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close(self): '''Save the cookie jar if needed.''' if self._save_filename: self._cookie_jar.save( self._save_filename, ignore_discard=self._keep_session_cookies )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def start(self, use_atexit=True): '''Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit. ''' assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = asyncio.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield from process_future self._stderr_reader = asyncio.async(self._read_stderr()) self._stdout_reader = asyncio.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close(self): '''Terminate or kill the subprocess. This function is blocking. ''' if not self._process: return if self._process.returncode is not None: return _logger.debug('Terminate process.') try: self._process.terminate() except OSError as error: if error.errno != errno.ESRCH: raise for dummy in range(10): if self._process.returncode is not None: return time.sleep(0.05) _logger.debug('Failed to terminate. Killing.') try: self._process.kill() except OSError as error: if error.errno != errno.ESRCH: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_stdout(self): '''Continuously read the stdout for messages.''' try: while self._process.returncode is None: line = yield from self._process.stdout.readline() _logger.debug('Read stdout line %s', repr(line)) if not line: break if self._stdout_callback: yield from self._stdout_callback(line) except Exception: _logger.exception('Unhandled read stdout exception.') raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_stderr(self): '''Continuously read stderr for error messages.''' try: while self._process.returncode is None: line = yield from self._process.stderr.readline() if not line: break if self._stderr_callback: yield from self._stderr_callback(line) except Exception: _logger.exception('Unhandled read stderr exception.') raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_file(self, file: Union[IO, asyncio.StreamWriter]=None): '''Read from connection to file. Args: file: A file object or a writer stream. ''' if file: file_is_async = hasattr(file, 'drain') while True: data = yield from self._connection.read(4096) if not data: break if file: file.write(data) if file_is_async: yield from file.drain() self._data_event_dispatcher.notify_read(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def reconnect(self): '''Connected the stream if needed. Coroutine. ''' if self._connection.closed(): self._connection.reset() yield from self._connection.connect()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write_command(self, command: Command): '''Write a command to the stream. Args: command: The command. Coroutine. ''' _logger.debug('Write command.') data = command.to_bytes() yield from self._connection.write(data) self._data_event_dispatcher.notify_write(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_reply(self) -> Reply: '''Read a reply from the stream. Returns: .ftp.request.Reply: The reply Coroutine. ''' _logger.debug('Read reply') reply = Reply() while True: line = yield from self._connection.readline() if line[-1:] != b'\n': raise NetworkError('Connection closed.') self._data_event_dispatcher.notify_read(line) reply.parse(line) if reply.code is not None: break return reply
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def can_fetch_pool(self, request: Request): '''Return whether the request can be fetched based on the pool.''' url_info = request.url_info user_agent = request.fields.get('User-agent', '') if self._robots_txt_pool.has_parser(url_info): return self._robots_txt_pool.can_fetch(url_info, user_agent) else: raise NotInPoolError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def fetch_robots_txt(self, request: Request, file=None): '''Fetch the robots.txt file for the request. Coroutine. ''' url_info = request.url_info url = URLInfo.parse('{0}://{1}/robots.txt'.format( url_info.scheme, url_info.hostname_with_port)).url if not file: file = wpull.body.new_temp_file(os.getcwd(), hint='robots') with contextlib.closing(file): request = Request(url) session = self._web_client.session(request) while not session.done(): wpull.util.truncate_file(file.name) try: response = yield from session.start() yield from session.download(file=file) except ProtocolError: self._accept_as_blank(url_info) return status_code = response.status_code if 500 <= status_code <= 599: raise ServerError('Server returned error for robots.txt.') if status_code == 200: self._read_content(response, url_info) else: self._accept_as_blank(url_info)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def can_fetch(self, request: Request, file=None) -> bool: '''Return whether the request can fetched. Args: request: Request. file: A file object to where the robots.txt contents are written. Coroutine. ''' try: return self.can_fetch_pool(request) except NotInPoolError: pass yield from self.fetch_robots_txt(request, file=file) return self.can_fetch_pool(request)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_content(self, response: Response, original_url_info: URLInfo): '''Read response and parse the contents into the pool.''' data = response.body.read(4096) url_info = original_url_info try: self._robots_txt_pool.load_robots_txt(url_info, data) except ValueError: _logger.warning(__( _('Failed to parse {url} for robots exclusion rules. ' 'Ignoring.'), url_info.url)) self._accept_as_blank(url_info) else: _logger.debug(__('Got a good robots.txt for {0}.', url_info.url))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _accept_as_blank(self, url_info: URLInfo): '''Mark the URL as OK in the pool.''' _logger.debug(__('Got empty robots.txt for {0}.', url_info.url)) self._robots_txt_pool.load_robots_txt(url_info, '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_text_format(self): '''Format as detached DNS information as text.''' return '\n'.join(itertools.chain( (self.fetch_date.strftime('%Y%m%d%H%M%S'), ), (rr.to_text() for rr in self.resource_records), (), ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def first_ipv4(self) -> Optional[AddressInfo]: '''The first IPv4 address.''' for info in self._address_infos: if info.family == socket.AF_INET: return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def first_ipv6(self) -> Optional[AddressInfo]: '''The first IPV6 address.''' for info in self._address_infos: if info.family == socket.AF_INET6: return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rotate(self): '''Move the first address to the last position.''' item = self._address_infos.pop(0) self._address_infos.append(item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _query_dns(self, host: str, family: int=socket.AF_INET) \ -> dns.resolver.Answer: '''Query DNS using Python. Coroutine. ''' record_type = {socket.AF_INET: 'A', socket.AF_INET6: 'AAAA'}[family] event_loop = asyncio.get_event_loop() query = functools.partial( self._dns_resolver.query, host, record_type, source=self._bind_address) try: answer = yield from event_loop.run_in_executor(None, query) except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as error: # dnspython doesn't raise an instance with a message, so use the # class name instead. raise DNSNotFound( 'DNS resolution failed: {error}' .format(error=wpull.util.get_exception_message(error)) ) from error except dns.exception.DNSException as error: raise NetworkError( 'DNS resolution error: {error}' .format(error=wpull.util.get_exception_message(error)) ) from error else: return answer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _getaddrinfo(self, host: str, family: int=socket.AF_UNSPEC) \ -> List[tuple]: '''Query DNS using system resolver. Coroutine. ''' event_loop = asyncio.get_event_loop() query = event_loop.getaddrinfo(host, 0, family=family, proto=socket.IPPROTO_TCP) if self._timeout: query = asyncio.wait_for(query, self._timeout) try: results = yield from query except socket.error as error: if error.errno in ( socket.EAI_FAIL, socket.EAI_NODATA, socket.EAI_NONAME): raise DNSNotFound( 'DNS resolution failed: {error}'.format(error=error) ) from error else: raise NetworkError( 'DNS resolution error: {error}'.format(error=error) ) from error except asyncio.TimeoutError as error: raise NetworkError('DNS resolve timed out.') from error else: return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _convert_dns_answer(cls, answer: dns.resolver.Answer) \ -> Iterable[AddressInfo]: '''Convert the DNS answer to address info.''' assert answer.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA) if answer.rdtype == dns.rdatatype.A: family = socket.AF_INET else: family = socket.AF_INET6 for record in answer: ip_address = record.to_text() if family == socket.AF_INET6: flow_info, control_id = cls._get_ipv6_info(ip_address) else: flow_info = control_id = None yield AddressInfo(ip_address, family, flow_info, control_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _convert_addrinfo(cls, results: List[tuple]) -> Iterable[AddressInfo]: '''Convert the result list to address info.''' for result in results: family = result[0] address = result[4] ip_address = address[0] if family == socket.AF_INET6: flow_info = address[2] control_id = address[3] else: flow_info = None control_id = None yield AddressInfo(ip_address, family, flow_info, control_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_ipv6_info(cls, ip_address: str) -> tuple: '''Extract the flow info and control id.''' results = socket.getaddrinfo( ip_address, 0, proto=socket.IPPROTO_TCP, flags=socket.AI_NUMERICHOST) flow_info = results[0][4][2] control_id = results[0][4][3] return flow_info, control_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def raise_if_not_match(cls, action: str, expected_code: Union[int, Sequence[int]], reply: Reply): '''Raise FTPServerError if not expected reply code. Args: action: Label to use in the exception message. expected_code: Expected 3 digit code. reply: Reply from the server. ''' if isinstance(expected_code, int): expected_codes = (expected_code,) else: expected_codes = expected_code if reply.code not in expected_codes: raise FTPServerError( 'Failed action {action}: {reply_code} {reply_text}' .format(action=action, reply_code=reply.code, reply_text=ascii(reply.text) ), reply.code )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_welcome_message(self): '''Read the welcome message. Coroutine. ''' reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Server ready', ReplyCodes.service_ready_for_new_user, reply)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def passive_mode(self) -> Tuple[str, int]: '''Enable passive mode. Returns: The address (IP address, port) of the passive port. Coroutine. ''' yield from self._control_stream.write_command(Command('PASV')) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Passive mode', ReplyCodes.entering_passive_mode, reply) try: return wpull.protocol.ftp.util.parse_address(reply.text) except ValueError as error: raise ProtocolError(str(error)) from error
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def setup_data_stream( self, connection_factory: Callable[[tuple], Connection], data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \ DataStream: '''Create and setup a data stream. This function will set up passive and binary mode and handle connecting to the data connection. Args: connection_factory: A coroutine callback that returns a connection data_stream_factory: A callback that returns a data stream Coroutine. Returns: DataStream ''' yield from self._control_stream.write_command(Command('TYPE', 'I')) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply) address = yield from self.passive_mode() connection = yield from connection_factory(address) # TODO: unit test for following line for connections that have # the same port over time but within pool cleaning intervals connection.reset() yield from connection.connect() data_stream = data_stream_factory(connection) return data_stream
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def begin_stream(self, command: Command) -> Reply: '''Start sending content on the data stream. Args: command: A command that tells the server to send data over the data connection. Coroutine. Returns: The begin reply. ''' yield from self._control_stream.write_command(command) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Begin stream', ( ReplyCodes.file_status_okay_about_to_open_data_connection, ReplyCodes.data_connection_already_open_transfer_starting, ), reply ) return reply
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_stream(self, file: IO, data_stream: DataStream) -> Reply: '''Read from the data stream. Args: file: A destination file object or a stream writer. data_stream: The stream of which to read from. Coroutine. Returns: Reply: The final reply. ''' yield from data_stream.read_file(file=file) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'End stream', ReplyCodes.closing_data_connection, reply ) data_stream.close() return reply
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def restart(self, offset: int): '''Send restart command. Coroutine. ''' yield from self._control_stream.write_command(Command('REST', str(offset))) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('Restart', ReplyCodes.requested_file_action_pending_further_information, reply)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_version(exe_path='youtube-dl'): '''Get the version string of youtube-dl.''' process = subprocess.Popen( [exe_path, '--version'], stdout=subprocess.PIPE ) version_string = process.communicate()[0] version_string = version_string.decode().strip() assert ' ' not in version_string, version_string return version_string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_output_template(self): '''Return the path prefix and output template.''' path = self._file_writer_session.extra_resource_path('.youtube-dl') if not path: self._temp_dir = tempfile.TemporaryDirectory( dir=self._root_path, prefix='tmp-wpull-youtubedl' ) path = '{}/tmp'.format(self._temp_dir.name) return path, '{}.%(id)s.%(format_id)s.%(ext)s'.format(path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _write_warc_metadata(self): '''Write the JSON metadata to WARC. Uses pywb spec. ''' uri = 'metadata://{}{}'.format(self._item_session.url_record.url_info.authority, self._item_session.url_record.url_info.resource) glob_pattern = self._path_prefix + '*.info.json' filenames = list(glob.glob(glob_pattern)) if not filenames: _logger.warning(__( _('Could not find external process metadata file: {filename}'), filename=glob_pattern )) return for filename in filenames: record = WARCRecord() record.set_common_fields('metadata', 'application/vnd.youtube-dl_formats+json') record.fields['WARC-Target-URI'] = uri record.block_file = open(filename, 'rb') self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record) record.block_file.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def process(self, session: AppSession): '''Populate the visits from the CDX into the URL table.''' if not session.args.warc_dedup: return iterable = wpull.warc.format.read_cdx( session.args.warc_dedup, encoding=session.args.local_encoding or 'utf-8' ) missing_url_msg = _('The URL ("a") is missing from the CDX file.') missing_id_msg = _('The record ID ("u") is missing from the CDX file.') missing_checksum_msg = \ _('The SHA1 checksum ("k") is missing from the CDX file.') counter = 0 def visits(): nonlocal counter checked_fields = False for record in iterable: if not checked_fields: if 'a' not in record: raise ValueError(missing_url_msg) if 'u' not in record: raise ValueError(missing_id_msg) if 'k' not in record: raise ValueError(missing_checksum_msg) checked_fields = True yield record['a'], record['u'], record['k'] counter += 1 url_table = session.factory['URLTable'] url_table.add_visits(visits()) _logger.info(__( gettext.ngettext( 'Loaded {num} record from CDX file.', 'Loaded {num} records from CDX file.', counter ), num=counter ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_lxml_encoding(encoding): '''Check if lxml supports the specified encoding. Returns: str, None ''' # XXX: Workaround lxml not liking utf-16-le try: lxml.html.HTMLParser(encoding=encoding) except LookupError: encoding = encoding.replace('-', '') else: return encoding try: lxml.html.HTMLParser(encoding=encoding) except LookupError: encoding = encoding.replace('_', '') else: return encoding try: lxml.html.HTMLParser(encoding=encoding) except LookupError: pass else: return encoding
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): '''Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element` ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding elements = [] callback_func = elements.append target = target_class(callback_func) if parser_type == 'html': parser = lxml.html.HTMLParser( encoding=lxml_encoding, target=target ) elif parser_type == 'xhtml': parser = lxml.html.XHTMLParser( encoding=lxml_encoding, target=target, recover=True ) else: parser = lxml.etree.XMLParser( encoding=lxml_encoding, target=target, recover=True ) if parser_type == 'html': # XXX: Force libxml2 to do full read in case of early "</html>" # See https://github.com/chfoo/wpull/issues/104 # See https://bugzilla.gnome.org/show_bug.cgi?id=727935 for dummy in range(3): parser.feed('<html>'.encode(encoding)) while True: data = file.read(self.BUFFER_SIZE) if not data: break parser.feed(data) for element in elements: yield element del elements[:] parser.close() for element in elements: yield element
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_doctype(cls, file, encoding=None): '''Get the doctype from the document. Returns: str, None ''' if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding try: parser = lxml.etree.XMLParser(encoding=lxml_encoding, recover=True) tree = lxml.etree.parse( io.BytesIO(wpull.util.peek_file(file)), parser=parser ) if tree.getroot() is not None: return tree.docinfo.doctype except lxml.etree.LxmlError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def detect_parser_type(cls, file, encoding=None): '''Get the suitable parser type for the document. Returns: str ''' is_xml = XMLDetector.is_file(file) doctype = cls.parse_doctype(file, encoding=encoding) or '' if not doctype and is_xml: return 'xml' if 'XHTML' in doctype: return 'xhtml' return 'html'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def new_temp_file(directory=None, hint=''): '''Return a new temporary file.''' return tempfile.NamedTemporaryFile( prefix='tmp-wpull-{0}-'.format(hint), suffix='.tmp', dir=directory)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def content(self): '''Return the content of the file. If this function is invoked, the contents of the entire file is read and cached. Returns: ``bytes``: The entire content of the file. ''' if not self._content_data: if is_seekable(self.file): with wpull.util.reset_file_offset(self.file): self._content_data = self.file.read() else: self._content_data = self.file.read() return self._content_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def size(self): '''Return the size of the file.''' try: return os.fstat(self.file.fileno()).st_size except io.UnsupportedOperation: pass if is_seekable(self.file): with wpull.util.reset_file_offset(self.file): self.file.seek(0, os.SEEK_END) return self.file.tell() raise OSError('Unsupported operation.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _apply_pragmas_callback(cls, connection, record): '''Set SQLite pragmas. Write-ahead logging, synchronous=NORMAL is used. ''' _logger.debug('Setting pragmas.') connection.execute('PRAGMA journal_mode=WAL') connection.execute('PRAGMA synchronous=NORMAL')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def has_parser(self, url_info: URLInfo): '''Return whether a parser has been created for the URL.''' key = self.url_info_key(url_info) return key in self._parsers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def can_fetch(self, url_info: URLInfo, user_agent: str): '''Return whether the URL can be fetched.''' key = self.url_info_key(url_info) parser = self._parsers[key] return parser.is_allowed(user_agent, url_info.url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load_robots_txt(self, url_info: URLInfo, text: str): '''Load the robot.txt file.''' key = self.url_info_key(url_info) parser = robotexclusionrulesparser.RobotExclusionRulesParser() parser.parse(text) self._parsers[key] = parser
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_demux_document_scraper(cls, session: AppSession): '''Create demux document scraper.''' session.factory.new( 'DemuxDocumentScraper', cls._build_document_scrapers(session))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_document_scrapers(cls, session: AppSession): '''Create the document scrapers. Returns: A list of document scrapers ''' html_parser = session.factory['HTMLParser'] element_walker = session.factory.new('ElementWalker') scrapers = [ session.factory.new( 'HTMLScraper', html_parser, element_walker, followed_tags=session.args.follow_tags, ignored_tags=session.args.ignore_tags, only_relative=session.args.relative, robots=session.args.robots, encoding_override=session.args.remote_encoding, ), ] if 'css' in session.args.link_extractors: css_scraper = session.factory.new( 'CSSScraper', encoding_override=session.args.remote_encoding, ) scrapers.append(css_scraper) element_walker.css_scraper = css_scraper if 'javascript' in session.args.link_extractors: javascript_scraper = session.factory.new( 'JavaScriptScraper', encoding_override=session.args.remote_encoding, ) scrapers.append(javascript_scraper) element_walker.javascript_scraper = javascript_scraper if session.args.sitemaps: scrapers.append(session.factory.new( 'SitemapScraper', html_parser, encoding_override=session.args.remote_encoding, )) return scrapers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_request_factory(cls, session: AppSession): '''Create the request factory. A request factory is any callable object that returns a :class:`.http.Request`. The callable must accept the same arguments to Request. Returns: A callable object ''' def request_factory(*args, **kwargs): request = session.factory.class_map['Request'](*args, **kwargs) user_agent = session.args.user_agent or session.default_user_agent request.fields['User-Agent'] = user_agent if session.args.referer: request.fields['Referer'] = session.args.referer for header_string in session.args.header: request.fields.parse(header_string) if session.args.http_compression: request.fields['Accept-Encoding'] = 'gzip, deflate' if session.args.no_cache: request.fields['Cache-Control'] = 'no-cache, must-revalidate' request.fields['Pragma'] = 'no-cache' return request return request_factory
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_http_client(cls, session: AppSession): '''Create the HTTP client. Returns: Client: An instance of :class:`.http.Client`. ''' # TODO: # recorder = self._build_recorder() stream_factory = functools.partial( HTTPStream, ignore_length=session.args.ignore_length, keep_alive=session.args.http_keep_alive) return session.factory.new( 'HTTPClient', connection_pool=session.factory['ConnectionPool'], stream_factory=stream_factory )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_web_client(cls, session: AppSession): '''Build Web Client.''' cookie_jar = cls._build_cookie_jar(session) http_client = cls._build_http_client(session) redirect_factory = functools.partial( session.factory.class_map['RedirectTracker'], max_redirects=session.args.max_redirect ) return session.factory.new( 'WebClient', http_client, redirect_tracker_factory=redirect_factory, cookie_jar=cookie_jar, request_factory=cls._build_request_factory(session), )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _build_cookie_jar(cls, session: AppSession): '''Build the cookie jar''' if not session.args.cookies: return if session.args.load_cookies or session.args.save_cookies: session.factory.set('CookieJar', BetterMozillaCookieJar) cookie_jar = session.factory.new('CookieJar') if session.args.load_cookies: cookie_jar.load(session.args.load_cookies, ignore_discard=True) else: cookie_jar = session.factory.new('CookieJar') policy = session.factory.new('CookiePolicy', cookie_jar=cookie_jar) cookie_jar.set_policy(policy) _logger.debug(__('Loaded cookies: {0}', list(cookie_jar))) cookie_jar_wrapper = session.factory.new( 'CookieJarWrapper', cookie_jar, save_filename=session.args.save_cookies, keep_session_cookies=session.args.keep_session_cookies, ) return cookie_jar_wrapper