docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Cache a refresh token, ignoring any failure. Args: refresh_token (str): Refresh token to cache.
def set(self, refresh_token): logger.info('Saving refresh_token to %s', repr(self._filename)) try: with open(self._filename, 'w') as f: f.write(refresh_token) except IOError as e: logger.warning('Failed to save refresh_token: %s', e)
360,547
Create a new channel. Args: session (http_utils.Session): Request session. max_retries (int): Number of retries for long-polling request. retry_backoff_base (int): The base term for the long-polling exponential backoff.
def __init__(self, session, max_retries, retry_backoff_base): # Event fired when channel connects with arguments (): self.on_connect = event.Event('Channel.on_connect') # Event fired when channel reconnects with arguments (): self.on_reconnect = event.Event('Channel.on_reconnect') # Event fired when channel disconnects with arguments (): self.on_disconnect = event.Event('Channel.on_disconnect') # Event fired when an array is received with arguments (array): self.on_receive_array = event.Event('Channel.on_receive_array') self._max_retries = max_retries self._retry_backoff_base = retry_backoff_base # True if the channel is currently connected: self._is_connected = False # True if the on_connect event has been called at least once: self._on_connect_called = False # Parser for assembling messages: self._chunk_parser = None # Session for HTTP requests: self._session = session # Discovered parameters: self._sid_param = None self._gsessionid_param = None
360,552
Construct :class:`ChatMessageSegment` list parsed from a string. Args: text (str): Text to parse. May contain line breaks, URLs and formatting markup (simplified Markdown and HTML) to be converted into equivalent segments. Returns: List of :class:`ChatMessageSegment` objects.
def from_str(text): segment_list = chat_message_parser.parse(text) return [ChatMessageSegment(segment.text, **segment.params) for segment in segment_list]
360,560
Construct :class:`ChatMessageSegment` from ``Segment`` message. Args: segment: ``Segment`` message to parse. Returns: :class:`ChatMessageSegment` object.
def deserialize(segment): link_target = segment.link_data.link_target return ChatMessageSegment( segment.text, segment_type=segment.type, is_bold=segment.formatting.bold, is_italic=segment.formatting.italic, is_strikethrough=segment.formatting.strikethrough, is_underline=segment.formatting.underline, link_target=None if link_target == '' else link_target )
360,561
Return whether the a path is a subpath of another. Args: base_path: The base path test_path: The path which we are testing trailing_slash: If True, the trailing slash is treated with importance. For example, ``/images/`` is a directory while ``/images`` is a file. wildcards: If True, globbing wildcards are matched against paths
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False): if trailing_slash: base_path = base_path.rsplit('/', 1)[0] + '/' test_path = test_path.rsplit('/', 1)[0] + '/' else: if not base_path.endswith('/'): base_path += '/' if not test_path.endswith('/'): test_path += '/' if wildcards: return fnmatch.fnmatchcase(test_path, base_path) else: return test_path.startswith(base_path)
360,621
Flatten an absolute URL path by removing the dot segments. :func:`urllib.parse.urljoin` has some support for removing dot segments, but it is conservative and only removes them as needed. Arguments: path (str): The URL path. flatten_slashes (bool): If True, consecutive slashes are removed. The path returned will always have a leading slash.
def flatten_path(path, flatten_slashes=False): # Based on posixpath.normpath # Fast path if not path or path == '/': return '/' # Take off leading slash if path[0] == '/': path = path[1:] parts = path.split('/') new_parts = collections.deque() for part in parts: if part == '.' or (flatten_slashes and not part): continue elif part != '..': new_parts.append(part) elif new_parts: new_parts.pop() # If the filename is empty string if flatten_slashes and path.endswith('/') or not len(new_parts): new_parts.append('') # Put back leading slash new_parts.appendleft('') return '/'.join(new_parts)
360,626
Start a file or directory listing download. Args: request: Request. Returns: A Response populated with the initial data connection reply. Once the response is received, call :meth:`download`. Coroutine.
def start(self, request: Request) -> Response: if self._session_state != SessionState.ready: raise RuntimeError('Session not ready') response = Response() yield from self._prepare_fetch(request, response) response.file_transfer_size = yield from self._fetch_size(request) if request.restart_value: try: yield from self._commander.restart(request.restart_value) response.restart_value = request.restart_value except FTPServerError: _logger.debug('Could not restart file.', exc_info=1) yield from self._open_data_stream() command = Command('RETR', request.file_path) yield from self._begin_stream(command) self._session_state = SessionState.file_request_sent return response
360,651
Fetch a file listing. Args: request: Request. Returns: A listing response populated with the initial data connection reply. Once the response is received, call :meth:`download_listing`. Coroutine.
def start_listing(self, request: Request) -> ListingResponse: if self._session_state != SessionState.ready: raise RuntimeError('Session not ready') response = ListingResponse() yield from self._prepare_fetch(request, response) yield from self._open_data_stream() mlsd_command = Command('MLSD', self._request.file_path) list_command = Command('LIST', self._request.file_path) try: yield from self._begin_stream(mlsd_command) self._listing_type = 'mlsd' except FTPServerError as error: if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized, ReplyCodes.command_not_implemented): self._listing_type = None else: raise if not self._listing_type: # This code not in exception handler to avoid incorrect # exception chaining yield from self._begin_stream(list_command) self._listing_type = 'list' _logger.debug('Listing type is %s', self._listing_type) self._session_state = SessionState.directory_request_sent return response
360,652
Read the response content into file. Args: file: A file object or asyncio stream. rewind: Seek the given file back to its original offset after reading is finished. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: A Response populated with the final data connection reply. Be sure to call :meth:`start` first. Coroutine.
def download(self, file: Optional[IO]=None, rewind: bool=True, duration_timeout: Optional[float]=None) -> Response: if self._session_state != SessionState.file_request_sent: raise RuntimeError('File request not sent') if rewind and file and hasattr(file, 'seek'): original_offset = file.tell() else: original_offset = None if not hasattr(file, 'drain'): self._response.body = file if not isinstance(file, Body): self._response.body = Body(file) read_future = self._commander.read_stream(file, self._data_stream) try: reply = yield from \ asyncio.wait_for(read_future, timeout=duration_timeout) except asyncio.TimeoutError as error: raise DurationTimeout( 'Did not finish reading after {} seconds.' .format(duration_timeout) ) from error self._response.reply = reply if original_offset is not None: file.seek(original_offset) self.event_dispatcher.notify(self.Event.end_transfer, self._response) self._session_state = SessionState.response_received return self._response
360,655
Read file listings. Args: file: A file object or asyncio stream. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: A Response populated the file listings Be sure to call :meth:`start_file_listing` first. Coroutine.
def download_listing(self, file: Optional[IO], duration_timeout: Optional[float]=None) -> \ ListingResponse: if self._session_state != SessionState.directory_request_sent: raise RuntimeError('File request not sent') self._session_state = SessionState.file_request_sent yield from self.download(file=file, rewind=False, duration_timeout=duration_timeout) try: if self._response.body.tell() == 0: listings = () elif self._listing_type == 'mlsd': self._response.body.seek(0) machine_listings = wpull.protocol.ftp.util.parse_machine_listing( self._response.body.read().decode('utf-8', errors='surrogateescape'), convert=True, strict=False ) listings = list( wpull.protocol.ftp.util.machine_listings_to_file_entries( machine_listings )) else: self._response.body.seek(0) file = io.TextIOWrapper(self._response.body, encoding='utf-8', errors='surrogateescape') listing_parser = ListingParser(file=file) listings = list(listing_parser.parse_input()) _logger.debug('Listing detected as %s', listing_parser.type) # We don't want the file to be closed when exiting this function file.detach() except (ListingError, ValueError) as error: raise ProtocolError(*error.args) from error self._response.files = listings self._response.body.seek(0) self._session_state = SessionState.response_received return self._response
360,656
Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit.
def reply_code_tuple(code: int) -> Tuple[int, int, int]: return code // 100, code // 10 % 10, code % 10
360,713
Parse machine listing. Args: text: The listing. convert: Convert sizes and dates. strict: Method of handling errors. ``True`` will raise ``ValueError``. ``False`` will ignore rows with errors. Returns: list: A list of dict of the facts defined in RFC 3659. The key names must be lowercase. The filename uses the key ``name``.
def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \ List[dict]: # TODO: this function should be moved into the 'ls' package listing = [] for line in text.splitlines(False): facts = line.split(';') row = {} filename = None for fact in facts: name, sep, value = fact.partition('=') if sep: name = name.strip().lower() value = value.strip().lower() if convert: try: value = convert_machine_list_value(name, value) except ValueError: if strict: raise row[name] = value else: if name[0:1] == ' ': # Is a filename filename = name[1:] else: name = name.strip().lower() row[name] = '' if filename: row['name'] = filename listing.append(row) elif strict: raise ValueError('Missing filename.') return listing
360,714
Iterate CDX file. Args: file (str): A file object. encoding (str): The encoding of the file. Returns: iterator: Each item is a dict that maps from field key to value.
def read_cdx(file, encoding='utf8'): with codecs.getreader(encoding)(file) as stream: header_line = stream.readline() separator = header_line[0] field_keys = header_line.strip().split(separator) if field_keys.pop(0) != 'CDX': raise ValueError('CDX header not found.') for line in stream: yield dict(zip(field_keys, line.strip().split(separator)))
360,745
Clean closed connections. Args: force: Clean connected and idle connections too. Coroutine.
def clean(self, force: bool=False): with (yield from self._lock): for connection in tuple(self.ready): if force or connection.closed(): connection.close() self.ready.remove(connection)
360,757
Unregister a connection. Args: connection: Connection instance returned from :meth:`acquire`. reuse: If True, the connection is made available for reuse. Coroutine.
def release(self, connection: Connection, reuse: bool=True): yield from self._condition.acquire() self.busy.remove(connection) if reuse: self.ready.add(connection) self._condition.notify() self._condition.release()
360,760
Return an available connection. Args: host: A hostname or IP address. port: Port number. use_ssl: Whether to return a SSL connection. host_key: If provided, it overrides the key used for per-host connection pooling. This is useful for proxies for example. Coroutine.
def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) \ -> Union[Connection, SSLConnection]: assert isinstance(port, int), 'Expect int. Got {}'.format(type(port)) assert not self._closed yield from self._process_no_wait_releases() if use_ssl: connection_factory = functools.partial( self._ssl_connection_factory, hostname=host) else: connection_factory = functools.partial( self._connection_factory, hostname=host) connection_factory = functools.partial( HappyEyeballsConnection, (host, port), connection_factory, self._resolver, self._happy_eyeballs_table, is_ssl=use_ssl ) key = host_key or (host, port, use_ssl) with (yield from self._host_pools_lock): if key not in self._host_pools: host_pool = self._host_pools[key] = HostPool( connection_factory, max_connections=self._max_host_count ) self._host_pool_waiters[key] = 1 else: host_pool = self._host_pools[key] self._host_pool_waiters[key] += 1 _logger.debug('Check out %s', key) connection = yield from host_pool.acquire() connection.key = key # TODO: Verify this assert is always true # assert host_pool.count() <= host_pool.max_connections # assert key in self._host_pools # assert self._host_pools[key] == host_pool with (yield from self._host_pools_lock): self._host_pool_waiters[key] -= 1 return connection
360,762
Clean all closed connections. Args: force: Clean connected and idle connections too. Coroutine.
def clean(self, force: bool=False): assert not self._closed with (yield from self._host_pools_lock): for key, pool in tuple(self._host_pools.items()): yield from pool.clean(force=force) if not self._host_pool_waiters[key] and pool.empty(): del self._host_pools[key] del self._host_pool_waiters[key]
360,766
Increment the number of files downloaded. Args: size: The size of the file
def increment(self, size: int): assert size >= 0, size self.files += 1 self.size += size self.bandwidth_meter.feed(size)
360,816
Modify the request to be suitable for HTTP server. Args: full_url (bool): Use full URL as the URI. By default, only the path of the URL is given to the server.
def prepare_for_send(self, full_url=False): assert self.url assert self.method assert self.version url_info = self.url_info if 'Host' not in self.fields: self.fields['Host'] = url_info.hostname_with_port if not full_url: if url_info.query: self.resource_path = '{0}?{1}'.format(url_info.path, url_info.query) else: self.resource_path = url_info.path else: self.resource_path = url_info.url
360,841
Download content. Args: file: An optional file object for the document contents. duration_timeout: Maximum time in seconds of which the entire file must be read. Returns: Response: An instance of :class:`.http.request.Response`. See :meth:`WebClient.session` for proper usage of this function. Coroutine.
def download(self, file: Optional[IO[bytes]]=None, duration_timeout: Optional[float]=None): yield from \ self._current_session.download(file, duration_timeout=duration_timeout)
360,856
Begin a HTTP request Args: request: Request information. Returns: A response populated with the HTTP headers. Once the headers are received, call :meth:`download`. Coroutine.
def start(self, request: Request) -> Response: if self._session_state != SessionState.ready: raise RuntimeError('Session already started') assert not self._request self._request = request _logger.debug(__('Client fetch request {0}.', request)) connection = yield from self._acquire_request_connection(request) full_url = connection.proxied and not connection.tunneled self._stream = stream = self._stream_factory(connection) yield from self._stream.reconnect() request.address = connection.address self.event_dispatcher.notify(self.Event.begin_request, request) write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data) stream.data_event_dispatcher.add_write_listener(write_callback) yield from stream.write_request(request, full_url=full_url) if request.body: assert 'Content-Length' in request.fields length = int(request.fields['Content-Length']) yield from stream.write_body(request.body, length=length) stream.data_event_dispatcher.remove_write_listener(write_callback) self.event_dispatcher.notify(self.Event.end_request, request) read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data) stream.data_event_dispatcher.add_read_listener(read_callback) self._response = response = yield from stream.read_response() response.request = request self.event_dispatcher.notify(self.Event.begin_response, response) self._session_state = SessionState.request_sent return response
360,914
Read the response content into file. Args: file: A file object or asyncio stream. raw: Whether chunked transfer encoding should be included. rewind: Seek the given file back to its original offset after reading is finished. duration_timeout: Maximum time in seconds of which the entire file must be read. Be sure to call :meth:`start` first. Coroutine.
def download( self, file: Union[IO[bytes], asyncio.StreamWriter, None]=None, raw: bool=False, rewind: bool=True, duration_timeout: Optional[float]=None): if self._session_state != SessionState.request_sent: raise RuntimeError('Request not sent') if rewind and file and hasattr(file, 'seek'): original_offset = file.tell() else: original_offset = None if not hasattr(file, 'drain'): self._response.body = file if not isinstance(file, Body): self._response.body = Body(file) read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw) try: yield from asyncio.wait_for(read_future, timeout=duration_timeout) except asyncio.TimeoutError as error: raise DurationTimeout( 'Did not finish reading after {} seconds.' .format(duration_timeout) ) from error self._session_state = SessionState.response_received if original_offset is not None: file.seek(original_offset) self.event_dispatcher.notify(self.Event.end_response, self._response) self.recycle()
360,915
Return a filename from a URL. Args: url (str): The URL. index (str): If a filename could not be derived from the URL path, use index instead. For example, ``/images/`` will return ``index.html``. alt_char (bool): If True, the character for the query deliminator will be ``@`` intead of ``?``. This function does not include the directories and does not sanitize the filename. Returns: str
def url_to_filename(url, index='index.html', alt_char=False): assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url)) url_split_result = urllib.parse.urlsplit(url) filename = url_split_result.path.split('/')[-1] if not filename: filename = index if url_split_result.query: if alt_char: query_delim = '@' else: query_delim = '?' filename = '{0}{1}{2}'.format( filename, query_delim, url_split_result.query ) return filename
361,001
Return a directory path free of filenames. Args: dir_path (str): A directory path. suffix (str): The suffix to append to the part of the path that is a file. Returns: str
def anti_clobber_dir_path(dir_path, suffix='.d'): dir_path = os.path.normpath(dir_path) parts = dir_path.split(os.sep) for index in range(len(parts)): test_path = os.sep.join(parts[:index + 1]) if os.path.isfile(test_path): parts[index] += suffix return os.sep.join(parts) return dir_path
361,004
Convert a HTTP request. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): The referrering hostname or IP address. Returns: Request: An instance of :class:`urllib.request.Request`
def convert_http_request(request, referrer_host=None): new_request = urllib.request.Request( request.url_info.url, origin_req_host=referrer_host, ) for name, value in request.fields.get_all(): new_request.add_header(name, value) return new_request
361,060
Wrapped ``add_cookie_header``. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
def add_cookie_header(self, request, referrer_host=None): new_request = convert_http_request(request, referrer_host) self._cookie_jar.add_cookie_header(new_request) request.fields.clear() for name, value in new_request.header_items(): request.fields.add(name, value)
361,063
Wrapped ``extract_cookies``. Args: response: An instance of :class:`.http.request.Response`. request: An instance of :class:`.http.request.Request`. referrer_host (str): An hostname or IP address of the referrer URL.
def extract_cookies(self, response, request, referrer_host=None): new_response = HTTPResponseInfoWrapper(response) new_request = convert_http_request(request, referrer_host) self._cookie_jar.extract_cookies(new_response, new_request)
361,064
Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit.
def start(self, use_atexit=True): assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = asyncio.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield from process_future self._stderr_reader = asyncio.async(self._read_stderr()) self._stdout_reader = asyncio.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
361,067
Read from connection to file. Args: file: A file object or a writer stream.
def read_file(self, file: Union[IO, asyncio.StreamWriter]=None): if file: file_is_async = hasattr(file, 'drain') while True: data = yield from self._connection.read(4096) if not data: break if file: file.write(data) if file_is_async: yield from file.drain() self._data_event_dispatcher.notify_read(data)
361,076
Write a command to the stream. Args: command: The command. Coroutine.
def write_command(self, command: Command): _logger.debug('Write command.') data = command.to_bytes() yield from self._connection.write(data) self._data_event_dispatcher.notify_write(data)
361,078
Return whether the request can fetched. Args: request: Request. file: A file object to where the robots.txt contents are written. Coroutine.
def can_fetch(self, request: Request, file=None) -> bool: try: return self.can_fetch_pool(request) except NotInPoolError: pass yield from self.fetch_robots_txt(request, file=file) return self.can_fetch_pool(request)
361,083
Resolve hostname. Args: host: Hostname. Returns: Resolved IP addresses. Raises: DNSNotFound if the hostname could not be resolved or NetworkError if there was an error connecting to DNS servers. Coroutine.
def resolve(self, host: str) -> ResolveResult: _logger.debug(__('Lookup address {0}.', host)) try: host = self.hook_dispatcher.call(PluginFunctions.resolve_dns, host ) or host except HookDisconnected: pass cache_key = (host, self._family) if self._cache and cache_key in self._cache: resolve_result = self._cache[cache_key] _logger.debug(__('Return by cache {0}.', resolve_result)) if self._rotate: resolve_result.rotate() return resolve_result address_infos = [] dns_infos = [] if not self.dns_python_enabled: families = () elif self._family == IPFamilyPreference.any: families = (socket.AF_INET, socket.AF_INET6) elif self._family == IPFamilyPreference.ipv4_only: families = (socket.AF_INET, ) else: families = (socket.AF_INET6, ) for family in families: datetime_now = datetime.datetime.utcnow() try: answer = yield from self._query_dns(host, family) except DNSNotFound: continue else: dns_infos.append(DNSInfo(datetime_now, answer.response.answer)) address_infos.extend(self._convert_dns_answer(answer)) if not address_infos: # Maybe the address is defined in hosts file or mDNS if self._family == IPFamilyPreference.any: family = socket.AF_UNSPEC elif self._family == IPFamilyPreference.ipv4_only: family = socket.AF_INET else: family = socket.AF_INET6 results = yield from self._getaddrinfo(host, family) address_infos.extend(self._convert_addrinfo(results)) _logger.debug(__('Resolved addresses: {0}.', address_infos)) resolve_result = ResolveResult(address_infos, dns_infos) if self._cache: self._cache[cache_key] = resolve_result self.event_dispatcher.notify(PluginFunctions.resolve_dns_result, host, resolve_result) if self._rotate: resolve_result.shuffle() return resolve_result
361,092
Raise FTPServerError if not expected reply code. Args: action: Label to use in the exception message. expected_code: Expected 3 digit code. reply: Reply from the server.
def raise_if_not_match(cls, action: str, expected_code: Union[int, Sequence[int]], reply: Reply): if isinstance(expected_code, int): expected_codes = (expected_code,) else: expected_codes = expected_code if reply.code not in expected_codes: raise FTPServerError( 'Failed action {action}: {reply_code} {reply_text}' .format(action=action, reply_code=reply.code, reply_text=ascii(reply.text) ), reply.code )
361,098
Create and setup a data stream. This function will set up passive and binary mode and handle connecting to the data connection. Args: connection_factory: A coroutine callback that returns a connection data_stream_factory: A callback that returns a data stream Coroutine. Returns: DataStream
def setup_data_stream( self, connection_factory: Callable[[tuple], Connection], data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \ DataStream: yield from self._control_stream.write_command(Command('TYPE', 'I')) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply) address = yield from self.passive_mode() connection = yield from connection_factory(address) # TODO: unit test for following line for connections that have # the same port over time but within pool cleaning intervals connection.reset() yield from connection.connect() data_stream = data_stream_factory(connection) return data_stream
361,102
Start sending content on the data stream. Args: command: A command that tells the server to send data over the data connection. Coroutine. Returns: The begin reply.
def begin_stream(self, command: Command) -> Reply: yield from self._control_stream.write_command(command) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'Begin stream', ( ReplyCodes.file_status_okay_about_to_open_data_connection, ReplyCodes.data_connection_already_open_transfer_starting, ), reply ) return reply
361,103
Read from the data stream. Args: file: A destination file object or a stream writer. data_stream: The stream of which to read from. Coroutine. Returns: Reply: The final reply.
def read_stream(self, file: IO, data_stream: DataStream) -> Reply: yield from data_stream.read_file(file=file) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'End stream', ReplyCodes.closing_data_connection, reply ) data_stream.close() return reply
361,104
Return an iterator of elements found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. target_class: A class to be used for target parsing. parser_type (str): The type of parser to use. Accepted values: ``html``, ``xhtml``, ``xml``. Returns: iterator: Each item is an element from :mod:`.document.htmlparse.element`
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'): if encoding: lxml_encoding = to_lxml_encoding(encoding) or 'latin1' else: lxml_encoding = encoding elements = [] callback_func = elements.append target = target_class(callback_func) if parser_type == 'html': parser = lxml.html.HTMLParser( encoding=lxml_encoding, target=target ) elif parser_type == 'xhtml': parser = lxml.html.XHTMLParser( encoding=lxml_encoding, target=target, recover=True ) else: parser = lxml.etree.XMLParser( encoding=lxml_encoding, target=target, recover=True ) if parser_type == 'html': # XXX: Force libxml2 to do full read in case of early "</html>" # See https://github.com/chfoo/wpull/issues/104 # See https://bugzilla.gnome.org/show_bug.cgi?id=727935 for dummy in range(3): parser.feed('<html>'.encode(encoding)) while True: data = file.read(self.BUFFER_SIZE) if not data: break parser.feed(data) for element in elements: yield element del elements[:] parser.close() for element in elements: yield element
361,121
Given the hints, return whether the document is supported. Args: file: A file object containing the document. request (:class:`.http.request.Request`): An HTTP request. response (:class:`.http.request.Response`): An HTTP response. url_info (:class:`.url.URLInfo`): A URLInfo. Returns: bool: If True, the reader should be able to read it.
def is_supported(cls, file=None, request=None, response=None, url_info=None): tests = ( (response, cls.is_response), (file, cls.is_file), (request, cls.is_request), (url_info, cls.is_url) ) for instance, method in tests: if instance: try: result = method(instance) except NotImplementedError: pass else: if result: return True elif result is VeryFalse: return VeryFalse
361,179
Uncompress gzip data. Args: data (bytes): The gzip data. truncated (bool): If True, the decompressor is not flushed. This is a convenience function. Returns: bytes: The inflated data. Raises: zlib.error
def gzip_uncompress(data, truncated=False): decompressor = SimpleGzipDecompressor() inflated_data = decompressor.decompress(data) if not truncated: inflated_data += decompressor.flush() return inflated_data
361,197
Mark the item with the given status. Args: status: a value from :class:`Status`. increment_try_count: if True, increment the ``try_count`` value
def set_status(self, status: Status, increment_try_count: bool=True, filename: str=None): url = self.url_record.url assert not self._try_count_incremented, (url, status) if increment_try_count: self._try_count_incremented = True _logger.debug(__('Marking URL {0} status {1}.', url, status)) url_result = URLResult() url_result.filename = filename self.app_session.factory['URLTable'].check_in( url, status, increment_try_count=increment_try_count, url_result=url_result, ) self._processed = True
361,205
Return the document encoding from a HTTP header. Args: response (Response): An instance of :class:`.http.Response`. Returns: ``str``, ``None``: The codec name.
def get_heading_encoding(response): encoding = wpull.protocol.http.util.parse_charset( response.fields.get('content-type', '')) if encoding: return wpull.string.normalize_codec_name(encoding) else: return None
361,212
Return the likely encoding of the response document. Args: response (Response): An instance of :class:`.http.Response`. is_html (bool): See :func:`.util.detect_encoding`. peek (int): The maximum number of bytes of the document to be analyzed. Returns: ``str``, ``None``: The codec name.
def detect_response_encoding(response, is_html=False, peek=131072): encoding = get_heading_encoding(response) encoding = wpull.string.detect_encoding( wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html ) _logger.debug(__('Got encoding: {0}', encoding)) return encoding
361,213
Add a single URL to the table. Args: url: The URL to be added url_properties: Additional values to be saved url_data: Additional data to be saved
def add_one(self, url: str, url_properties: Optional[URLProperties]=None, url_data: Optional[URLData]=None): self.add_many([AddURLInfo(url, url_properties, url_data)])
361,217
Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values.
def check_in(self, url: str, new_status: Status, increment_try_count: bool=True, url_result: Optional[URLResult]=None):
361,218
Create an instance. Args: name (str): The name of the class args: The arguments to pass to the class. kwargs: The keyword arguments to pass to the class. Returns: instance
def new(self, name, *args, **kwargs): if name in self._instance_map: raise ValueError('Instance {0} is already initialized' .format(name)) instance = self._class_map[name](*args, **kwargs) self._instance_map[name] = instance return instance
361,227
Normalize the key name to title case. For example, ``normalize_name('content-id')`` will become ``Content-Id`` Args: name (str): The name to normalize. overrides (set, sequence): A set or sequence containing keys that should be cased to themselves. For example, passing ``set('WARC-Type')`` will normalize any key named "warc-type" to ``WARC-Type`` instead of the default ``Warc-Type``. Returns: str
def normalize_name(name, overrides=None): normalized_name = name.title() if overrides: override_map = dict([(name.title(), name) for name in overrides]) return override_map.get(normalized_name, normalized_name) else: return normalized_name
361,229
Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed.
def parse(self, string, strict=True): if isinstance(string, bytes): errors = 'strict' if strict else 'replace' string = string.decode(self.encoding, errors=errors) if not self.raw: self.raw = string else: self.raw += string lines = unfold_lines(string).splitlines() for line in lines: if line: if ':' not in line: if strict: raise ValueError('Field missing colon.') else: continue name, value = line.split(':', 1) name = name.strip() value = value.strip() self.add(name, value)
361,233
Open a file object on to the Response Body. Args: filename: The path where the file is to be saved response: Response mode: The file mode This function will create the directories if not exist.
def open_file(cls, filename: str, response: BaseResponse, mode='wb+'): _logger.debug('Saving file to {0}, mode={1}.', filename, mode) dir_path = os.path.dirname(filename) if dir_path and not os.path.exists(dir_path): os.makedirs(dir_path) response.body = Body(open(filename, mode))
361,272
Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response
def set_timestamp(cls, filename: str, response: HTTPResponse): last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
361,273
Prepend the HTTP response header to the file. Args: filename: The path of the file response: Response
def save_headers(cls, filename: str, response: HTTPResponse): new_filename = filename + '-new' with open('wb') as new_file: new_file.write(response.header()) with wpull.util.reset_file_offset(response.body): response.body.seek(0) shutil.copyfileobj(response.body, new_file) os.remove(filename) os.rename(new_filename, filename)
361,274
Return whether the connection should be closed. Args: http_version (str): The HTTP version string like ``HTTP/1.0``. connection_field (str): The value for the ``Connection`` header.
def should_close(http_version, connection_field): connection_field = (connection_field or '').lower() if http_version == 'HTTP/1.0': return connection_field.replace('-', '') != 'keepalive' else: return connection_field == 'close'
361,298
Return the file text and processed absolute links. Args: file: A file object containing the document. encoding (str): The encoding of the document. base_url (str): The URL at which the document is located. Returns: iterator: Each item is a tuple: 1. str: The text 2. bool: Whether the text a link
def iter_processed_text(self, file, encoding=None, base_url=None): for text, is_link in self.iter_text(file, encoding): if is_link and base_url: new_link = urljoin_safe(base_url, text, allow_fragments=False) if new_link: yield (new_link, is_link) else: yield (new_link, False) else: yield (text, is_link)
361,354
Update the bandwidth meter. Args: data_len (int): The number of bytes transfered since the last call to :func:`feed`. feed_time (float): Current time.
def feed(self, data_len, feed_time=None): self._bytes_transferred += data_len self._collected_bytes_transferred += data_len time_now = feed_time or time.time() time_diff = time_now - self._last_feed_time if time_diff < self._sample_min_time: return self._last_feed_time = time.time() if data_len == 0 and time_diff >= self._stall_time: self._stalled = True return self._samples.append((time_diff, self._collected_bytes_transferred)) self._collected_bytes_transferred = 0
361,361
Load the response and increment the counter. Args: response (:class:`.http.request.Response`): The response from a previous request.
def load(self, response): self._response = response if self.next_location(raw=True): self._num_redirects += 1
361,372
Returns the next location. Args: raw (bool): If True, the original string contained in the Location field will be returned. Otherwise, the URL will be normalized to a complete URL. Returns: str, None: If str, the location. Otherwise, no next location.
def next_location(self, raw=False): if self._response: location = self._response.fields.get('location') if not location or raw: return location return wpull.url.urljoin(self._response.request.url_info.url, location)
361,373
Return an iterator of links found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. Returns: iterable: str
def read_links(self, file, encoding=None): return [item[0] for item in self.iter_text(file, encoding) if item[1]]
361,450
Set the exit code based on the error type. Args: error (:class:`Exception`): An exception instance.
def _update_exit_code_from_error(self, error): for error_type, exit_code in self.ERROR_CODE_MAP.items(): if isinstance(error, error_type): self.update_exit_code(exit_code) break else: self.update_exit_code(ExitStatus.generic_error)
361,457
Set the exit code if it is serious than before. Args: code: The exit code.
def update_exit_code(self, code: int): if code: if self._exit_code: self._exit_code = min(self._exit_code, code) else: self._exit_code = code
361,458
Consult by fetching robots.txt as needed. Args: request: The request to be made to get the file. Returns: True if can fetch Coroutine
def consult_robots_txt(self, request: HTTPRequest) -> bool: if not self._robots_txt_checker: return True result = yield from self._robots_txt_checker.can_fetch(request) return result
361,461
Consult the URL filter. Args: url_record: The URL record. is_redirect: Whether the request is a redirect and it is desired that it spans hosts. Returns tuple: 1. bool: The verdict 2. str: A short reason string: nofilters, filters, redirect 3. dict: The result from :func:`DemuxURLFilter.test_info`
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \ -> Tuple[bool, str, dict]: if not self._url_filter: return True, 'nofilters', None test_info = self._url_filter.test_info(url_info, url_record) verdict = test_info['verdict'] if verdict: reason = 'filters' elif is_redirect and self.is_only_span_hosts_failed(test_info): verdict = True reason = 'redirect' else: reason = 'filters' return verdict, reason, test_info
361,462
Return the wait time between requests. Args: seconds: The original time in seconds. item_session: error: Returns: The time in seconds.
def plugin_wait_time(seconds: float, item_session: ItemSession, error: Optional[Exception]=None) -> float: return seconds
361,476
Upload local images to Mapillary Args: import_path: Directory path to where the images are stored. verbose: Print extra warnings and errors. skip_subfolders: Skip images stored in subdirectories. Returns: Images are uploaded to Mapillary and flagged locally as uploaded.
def upload(import_path, verbose=False, skip_subfolders=False, number_threads=None, max_attempts=None, video_import_path=None, dry_run=False,api_version=1.0): # sanity check if video file is passed if video_import_path and (not os.path.isdir(video_import_path) and not os.path.isfile(video_import_path)): print("Error, video path " + video_import_path + " does not exist, exiting...") sys.exit(1) # in case of video processing, adjust the import path if video_import_path: # set sampling path video_sampling_path = "mapillary_sampled_video_frames" video_dirname = video_import_path if os.path.isdir( video_import_path) else os.path.dirname(video_import_path) import_path = os.path.join(os.path.abspath(import_path), video_sampling_path) if import_path else os.path.join( os.path.abspath(video_dirname), video_sampling_path) # basic check for all if not import_path or not os.path.isdir(import_path): print("Error, import directory " + import_path + " does not exist, exiting...") sys.exit(1) # get list of file to process total_file_list = uploader.get_total_file_list( import_path, skip_subfolders) upload_file_list = uploader.get_upload_file_list( import_path, skip_subfolders) failed_file_list = uploader.get_failed_upload_file_list( import_path, skip_subfolders) success_file_list = uploader.get_success_upload_file_list( import_path, skip_subfolders) to_finalize_file_list = uploader.get_finalize_file_list( import_path, skip_subfolders) if len(success_file_list) == len(total_file_list): print("All images have already been uploaded") else: if len(failed_file_list): upload_failed = raw_input( "Retry uploading previously failed image uploads? [y/n]: ") if not ipc.is_enabled() else 'y' # if yes, add images to the upload list if upload_failed in ["y", "Y", "yes", "Yes"]: upload_file_list.extend(failed_file_list) # verify the images in the upload list, they need to have the image # description and certain MAP properties upload_file_list = [ f for f in upload_file_list if verify_mapillary_tag(f)] if not len(upload_file_list) and not len(to_finalize_file_list): print("No images to upload.") print('Please check if all images contain the required Mapillary metadata. If not, you can use "mapillary_tools process" to add them') sys.exit(1) if len(upload_file_list): # get upload params for the manual upload images, group them per sequence # and separate direct upload images params = {} list_per_sequence_mapping = {} direct_upload_file_list = [] for image in upload_file_list: log_root = uploader.log_rootpath(image) upload_params_path = os.path.join( log_root, "upload_params_process.json") if os.path.isfile(upload_params_path): with open(upload_params_path, "rb") as jf: params[image] = json.load( jf, object_hook=uploader.ascii_encode_dict) sequence = params[image]["key"] if sequence in list_per_sequence_mapping: list_per_sequence_mapping[sequence].append(image) else: list_per_sequence_mapping[sequence] = [image] else: direct_upload_file_list.append(image) # inform how many images are to be uploaded and how many are being skipped # from upload print("Uploading {} images with valid mapillary tags (Skipping {})".format( len(upload_file_list), len(total_file_list) - len(upload_file_list))) if api_version==2.0: uploder.uploadfile_list if len(direct_upload_file_list): uploader.upload_file_list_direct( direct_upload_file_list, number_threads, max_attempts) for idx, sequence in enumerate(list_per_sequence_mapping): uploader.upload_file_list_manual( list_per_sequence_mapping[sequence], params, idx, number_threads, max_attempts) if len(to_finalize_file_list): params = {} sequences = [] for image in to_finalize_file_list: log_root = uploader.log_rootpath(image) upload_params_path = os.path.join( log_root, "upload_params_process.json") if os.path.isfile(upload_params_path): with open(upload_params_path, "rb") as jf: image_params = json.load( jf, object_hook=uploader.ascii_encode_dict) sequence = image_params["key"] if sequence not in sequences: params[image] = image_params sequences.append(sequence) for image in params: uploader.upload_done_file(**params[image]) uploader.flag_finalization(to_finalize_file_list) uploader.print_summary(upload_file_list)
361,634
The memcached "append" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
def append(self, key, value, expire=0, noreply=None): if noreply is None: noreply = self.default_noreply return self._store_cmd(b'append', {key: value}, expire, noreply)[key]
361,900
The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
def prepend(self, key, value, expire=0, noreply=None): if noreply is None: noreply = self.default_noreply return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
361,901
The memcached "get" command, but only for one key, as a convenience. Args: key: str, see class docs for details. default: value that will be returned if the key was not found. Returns: The value for the key, or default if the key wasn't found.
def get(self, key, default=None): return self._fetch_cmd(b'get', [key], False).get(key, default)
361,903
The memcached "gets" command for one key, as a convenience. Args: key: str, see class docs for details. default: value that will be returned if the key was not found. cas_default: same behaviour as default argument. Returns: A tuple of (value, cas) or (default, cas_defaults) if the key was not found.
def gets(self, key, default=None, cas_default=None): defaults = (default, cas_default) return self._fetch_cmd(b'gets', [key], True).get(key, defaults)
361,904
The memcached "delete" command. Args: key: str, see class docs for details. noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, always returns True. Otherwise returns True if the key was deleted, and False if it wasn't found.
def delete(self, key, noreply=None): if noreply is None: noreply = self.default_noreply cmd = b'delete ' + self.check_key(key) if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'delete', noreply) if noreply: return True return results[0] == b'DELETED'
361,905
The memcached "incr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
def incr(self, key, value, noreply=False): key = self.check_key(key) cmd = b'incr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'incr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
361,907
The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
def decr(self, key, value, noreply=False): key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
361,908
The memcached "touch" command. Args: key: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True if the expiration time was updated, False if the key wasn't found.
def touch(self, key, expire=0, noreply=None): if noreply is None: noreply = self.default_noreply key = self.check_key(key) cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'touch', noreply) if noreply: return True return results[0] == b'TOUCHED'
361,909
The memcached "stats" command. The returned keys depend on what the "stats" command returns. A best effort is made to convert values to appropriate Python types, defaulting to strings when a conversion cannot be made. Args: *arg: extra string arguments to the "stats" command. See the memcached protocol documentation for more information. Returns: A dict of the returned stats.
def stats(self, *args): result = self._fetch_cmd(b'stats', args, False) for key, value in six.iteritems(result): converter = STAT_TYPES.get(key, int) try: result[key] = converter(value) except Exception: pass return result
361,910
The memcached "cache_memlimit" command. Args: memlimit: int, the number of megabytes to set as the new cache memory limit. Returns: If no exception is raised, always returns True.
def cache_memlimit(self, memlimit): self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False) return True
361,911
The memcached "flush_all" command. Args: delay: optional int, the number of seconds to wait before flushing, or zero to flush immediately (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
def flush_all(self, delay=0, noreply=None): if noreply is None: noreply = self.default_noreply cmd = b'flush_all ' + six.text_type(delay).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'flush_all', noreply) if noreply: return True return results[0] == b'OK'
361,913
Makes the parameter table for the paper, saving it to a tex file in the tables folder. Also makes two partial parameter tables for the slides. Parameters ---------- filename : str Name of the file in which to save output (in the tables directory). Suffix .tex is automatically added. params : Object containing the parameter values. Returns ------- None
def makeParameterTable(filename, params): # Calibrated macroeconomic parameters macro_panel = "\multicolumn{3}{c}{\\textbf{Macroeconomic Parameters} } \n" macro_panel += "\\\\ $\\kapShare$ & " + "{:.2f}".format(params.CapShare) + " & Capital's Share of Income \n" macro_panel += "\\\\ $\\daleth$ & " + "{:.2f}".format(params.DeprFacAnn) + "^{1/4} & Depreciation Factor \n" macro_panel += "\\\\ $\sigma_{\Theta}^{2}$ & "+ "{:.5f}".format(params.TranShkAggVar) +" & Variance Aggregate Transitory Shocks \n" macro_panel += "\\\\ $\sigma_{\Psi}^{2}$ & "+ "{:.5f}".format(params.PermShkAggVar) +" & Variance Aggregate Permanent Shocks \n" # Steady state values SS_panel = "\multicolumn{3}{c}{ \\textbf{Steady State of Perfect Foresight DSGE Model} } \\ \n" SS_panel += "\\\\ \multicolumn{3}{c}{ $(\\sigma_{\\Psi}=\\sigma_{\\Theta}=\\sigma_{\\psi}=\\sigma_{\\theta}=\wp=\\PDies=0$, $\\Phi_t = 1)$} \\ \n" SS_panel += "\\\\ $\\breve{K}/\\breve{K}^{\\kapShare}$ & " + "{:.1f}".format(params.KYratioSS) + " & SS Capital to Output Ratio \n" SS_panel += "\\\\ $\\breve{K}$ & " + "{:.2f}".format(params.KSS) + " & SS Capital to Labor Productivity Ratio ($=12^{1/(1-\\kapShare)}$) \n" SS_panel += "\\\\ $\\breve{\\Wage}$ & " + "{:.2f}".format(params.wRteSS) + " & SS Wage Rate ($=(1-\\kapShare)\\breve{K}^{\\kapShare}$) \n" SS_panel += "\\\\ $\\breve{\\mathsf{r}}$ & " + "{:.2f}".format(params.rFreeSS) + " & SS Interest Rate ($=\\kapShare \\breve{K}^{\\kapShare-1}$) \n" SS_panel += "\\\\ $\\breve{\\Rprod}$ & " + "{:.3f}".format(params.RfreeSS) + "& SS Between-Period Return Factor ($=\\daleth + \\breve{\\mathsf{r}}$) \n" # Calibrated preference parameters pref_panel = "\multicolumn{3}{c}{ \\textbf{Preference Parameters} } \n" pref_panel += "\\\\ $\\rho$ & "+ "{:.0f}".format(params.CRRA) +". & Coefficient of Relative Risk Aversion \n" pref_panel += "\\\\ $\\beta_{SOE}$ & " + "{:.3f}".format(params.DiscFacSOE) +" & SOE Discount Factor \n" #($=0.99 \\cdot \\PLives / (\\breve{\\mathcal{R}} \\Ex [\\pmb{\\psi}^{-\CRRA}])$)\n" pref_panel += "\\\\ $\\beta_{DSGE}$ & " + "{:.3f}".format(params.DiscFacDSGE) +" & HA-DSGE Discount Factor ($=\\breve{\\Rprod}^{-1}$) \n" pref_panel += "\\\\ $\Pi$ & " + "{:.2f}".format(params.UpdatePrb) +" & Probability of Updating Expectations (if Sticky) \n" # Idiosyncratic shock parameters idio_panel = "\multicolumn{3}{c}{ \\textbf{Idiosyncratic Shock Parameters} } \n" idio_panel += "\\\\ $\sigma_{\\theta}^{2}$ & " + "{:.3f}".format(params.TranShkVar) +" & Variance Idiosyncratic Tran Shocks (=$4 \\times$ Annual) \n" idio_panel += "\\\\ $\sigma_{\psi}^{2}$ &" + "{:.3f}".format(params.PermShkVar) +" & Variance Idiosyncratic Perm Shocks (=$\\frac{1}{4} \\times$ Annual) \n" idio_panel += "\\\\ $\wp$ & " + "{:.3f}".format(params.UnempPrb) +" & Probability of Unemployment Spell \n" idio_panel += "\\\\ $\PDies$ & " + "{:.3f}".format(params.DiePrb) +" & Probability of Mortality \n" # Make full parameter table for paper paper_output = "\provideboolean{Slides} \setboolean{Slides}{false} \n" paper_output += "\\begin{minipage}{\\textwidth}\n" paper_output += " \\begin{table}\n" paper_output += " \\caption{Calibration}\label{table:calibration}\n" paper_output += "\\begin{tabular}{cd{5}l} \n" paper_output += "\\\\ \\toprule \n" paper_output += macro_panel paper_output += "\\\\ \\midrule \n" paper_output += SS_panel paper_output += "\\\\ \\midrule \n" paper_output += pref_panel paper_output += "\\\\ \\midrule \n" paper_output += idio_panel paper_output += "\\\\ \\bottomrule \n" paper_output += "\end{tabular}\n" paper_output += "\end{table}\n" paper_output += "\end{minipage}\n" paper_output += "\ifthenelse{\\boolean{StandAlone}}{\end{document}}{} \n" with open(tables_dir + filename + '.tex','w') as f: f.write(paper_output) f.close() # Make two partial parameter tables for the slides slides1_output = "\\begin{center}\label{table:calibration1} \n" slides1_output += "\\begin{tabular}{cd{5}l} \n" slides1_output += "\\\\ \\toprule \n" slides1_output += macro_panel slides1_output += "\\\\ \\midrule \n" slides1_output += SS_panel slides1_output += "\\\\ \\bottomrule \n" slides1_output += "\end{tabular} \n" slides1_output += "\end{center} \n" with open(tables_dir + filename + '_1.tex','w') as f: f.write(slides1_output) f.close() slides2_output = "\\begin{center}\label{table:calibration2} \n" slides2_output += "\\begin{tabular}{cd{5}l} \n" slides2_output += "\\\\ \\toprule \n" slides2_output += pref_panel slides2_output += "\\\\ \\midrule \n" slides2_output += idio_panel slides2_output += "\\\\ \\bottomrule \n" slides2_output += "\end{tabular} \n" slides2_output += "\end{center} \n" with open(tables_dir + filename + '_2.tex','w') as f: f.write(slides2_output) f.close()
362,109
Deploy all required raiden contracts and return a dict of contract_name:address Args: max_num_of_token_networks (Optional[int]): The max number of tokens that can be registered to the TokenNetworkRegistry. If None, the argument is omitted from the call to the constructor of TokenNetworkRegistry.
def deploy_raiden_contracts( self, max_num_of_token_networks: Optional[int], ) -> DeployedContracts: deployed_contracts: DeployedContracts = { 'contracts_version': self.contract_version_string(), 'chain_id': int(self.web3.version.network), 'contracts': {}, } self._deploy_and_remember(CONTRACT_ENDPOINT_REGISTRY, [], deployed_contracts) secret_registry = self._deploy_and_remember( contract_name=CONTRACT_SECRET_REGISTRY, arguments=[], deployed_contracts=deployed_contracts, ) token_network_registry_args = [ secret_registry.address, deployed_contracts['chain_id'], DEPLOY_SETTLE_TIMEOUT_MIN, DEPLOY_SETTLE_TIMEOUT_MAX, ] if max_num_of_token_networks: token_network_registry_args.append(max_num_of_token_networks) self._deploy_and_remember( contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, arguments=token_network_registry_args, deployed_contracts=deployed_contracts, ) return deployed_contracts
363,726
Use join-contracts.py to concatenate all imported Solidity files. Args: source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
def join_sources(source_module: DeploymentModule, contract_name: str): joined_file = Path(__file__).parent.joinpath('joined.sol') remapping = {module: str(path) for module, path in contracts_source_path().items()} command = [ './utils/join-contracts.py', '--import-map', json.dumps(remapping), str(contracts_source_path_of_deployment_module( source_module, ).joinpath(contract_name + '.sol')), str(joined_file), ] working_dir = Path(__file__).parent.parent try: subprocess.check_call(command, cwd=working_dir) except subprocess.CalledProcessError as ex: print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.') raise ex return joined_file.read_text()
363,760
Calls Etherscan API for verifying the Solidity source of a contract. Args: chain_id: EIP-155 chain id of the Ethereum chain apikey: key for calling Etherscan API source_module: a module name to look up contracts_source_path() contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.
def etherscan_verify_contract( chain_id: int, apikey: str, source_module: DeploymentModule, contract_name: str, ): etherscan_api = api_of_chain_id[chain_id] deployment_info = get_contracts_deployment_info( chain_id=chain_id, module=source_module, ) if deployment_info is None: raise FileNotFoundError( f'Deployment file not found for chain_id={chain_id} and module={source_module}', ) contract_manager = ContractManager(contracts_precompiled_path()) data = post_data_for_etherscan_verification( apikey=apikey, deployment_info=deployment_info['contracts'][contract_name], source=join_sources(source_module=source_module, contract_name=contract_name), contract_name=contract_name, metadata=json.loads(contract_manager.contracts[contract_name]['metadata']), constructor_args=get_constructor_args( deployment_info=deployment_info, contract_name=contract_name, contract_manager=contract_manager, ), ) response = requests.post(etherscan_api, data=data) content = json.loads(response.content.decode()) print(content) print(f'Status: {content["status"]}; {content["message"]} ; GUID = {content["result"]}') etherscan_url = etherscan_api.replace('api-', '').replace('api', '') etherscan_url += '/verifyContract2?a=' + data['contractaddress'] manual_submission_guide = f if content['status'] != '1': if content['result'] == 'Contract source code already verified': return else: raise ValueError( 'Etherscan submission failed for an unknown reason\n' + manual_submission_guide, ) # submission succeeded, obtained GUID guid = content['result'] status = '0' retries = 10 while status == '0' and retries > 0: retries -= 1 r = guid_status(etherscan_api=etherscan_api, guid=guid) status = r['status'] if r['result'] == 'Fail - Unable to verify': raise ValueError(manual_submission_guide) if r['result'] == 'Pass - Verified': return print('Retrying...') sleep(5) raise TimeoutError(manual_submission_guide)
363,763
Sets the Elasticsearch hosts to use Args: hosts (str): A single hostname or URL, or list of hostnames or URLs use_ssl (bool): Use a HTTPS connection to the server ssl_cert_path (str): Path to the certificate chain
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None): if type(hosts) != list: hosts = [hosts] conn_params = { "hosts": hosts, "timeout": 20 } if use_ssl: conn_params['use_ssl'] = True if ssl_cert_path: conn_params['verify_certs'] = True conn_params['ca_certs'] = ssl_cert_path else: conn_params['verify_certs'] = False connections.create_connection(**conn_params)
363,975
Create Elasticsearch indexes Args: names (list): A list of index names settings (dict): Index settings
def create_indexes(names, settings=None): for name in names: index = Index(name) try: if not index.exists(): logger.debug("Creating Elasticsearch index: {0}".format(name)) if settings is None: index.settings(number_of_shards=1, number_of_replicas=1) else: index.settings(**settings) index.create() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
363,976
Updates index mappings Args: aggregate_indexes (list): A list of aggregate index names forensic_indexes (list): A list of forensic index names
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): version = 2 if aggregate_indexes is None: aggregate_indexes = [] if forensic_indexes is None: forensic_indexes = [] for aggregate_index_name in aggregate_indexes: if not Index(aggregate_index_name).exists(): continue aggregate_index = Index(aggregate_index_name) doc = "doc" fo_field = "published_policy.fo" fo = "fo" fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field]) fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]["mappings"] if doc not in fo_mapping: continue fo_mapping = fo_mapping[doc][fo_field]["mapping"][fo] fo_type = fo_mapping["type"] if fo_type == "long": new_index_name = "{0}-v{1}".format(aggregate_index_name, version) body = {"properties": {"published_policy.fo": { "type": "text", "fields": { "keyword": { "type": "keyword", "ignore_above": 256 } } } } } Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass
363,977
Saves a parsed DMARC aggregate report to ElasticSearch Args: aggregate_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved
def save_aggregate_report_to_elasticsearch(aggregate_report, index_suffix=None, monthly_indexes=False): logger.debug("Saving aggregate report to Elasticsearch") aggregate_report = aggregate_report.copy() metadata = aggregate_report["report_metadata"] org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S") if monthly_indexes: index_date = begin_date.strftime("%Y-%m") else: index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match=dict(org_name=org_name))) report_id_query = Q(dict(match=dict(report_id=report_id))) domain_query = Q(dict(match={"published_policy.domain": domain})) begin_date_query = Q(dict(match=dict(date_range=begin_date))) end_date_query = Q(dict(match=dict(date_range=end_date))) search = Search(index="dmarc_aggregate*") query = org_name_query & report_id_query & domain_query query = query & begin_date_query & end_date_query search.query = query existing = search.execute() if len(existing) > 0: raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " "with a date range of {3} UTC to {4} UTC already " "exists in " "Elasticsearch".format(report_id, org_name, domain, begin_date_human, end_date_human)) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], aspf=aggregate_report["policy_published"]["aspf"], p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], fo=aggregate_report["policy_published"]["fo"] ) for record in aggregate_report["records"]: agg_doc = _AggregateReportDoc( xml_schemea=aggregate_report["xml_schema"], org_name=metadata["org_name"], org_email=metadata["org_email"], org_extra_contact_info=metadata["org_extra_contact_info"], report_id=metadata["report_id"], date_range=date_range, errors=metadata["errors"], published_policy=published_policy, source_ip_address=record["source"]["ip_address"], source_country=record["source"]["country"], source_reverse_dns=record["source"]["reverse_dns"], source_base_domain=record["source"]["base_domain"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], dkim_aligned=record["policy_evaluated"]["dkim"] == "pass", spf_aligned=record["policy_evaluated"]["spf"] == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], envelope_to=record["identifiers"]["envelope_to"] ) for override in record["policy_evaluated"]["policy_override_reasons"]: agg_doc.add_policy_override(type_=override["type"], comment=override["comment"]) for dkim_result in record["auth_results"]["dkim"]: agg_doc.add_dkim_result(domain=dkim_result["domain"], selector=dkim_result["selector"], result=dkim_result["result"]) for spf_result in record["auth_results"]["spf"]: agg_doc.add_spf_result(domain=spf_result["domain"], scope=spf_result["scope"], result=spf_result["result"]) index = "dmarc_aggregate" if index_suffix: index = "{0}_{1}".format(index, index_suffix) index = "{0}-{1}".format(index, index_date) create_indexes([index]) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__()))
363,978
Saves a parsed DMARC forensic report to ElasticSearch Args: forensic_report (OrderedDict): A parsed forensic report index_suffix (str): The suffix of the name of the index to save to monthly_indexes (bool): Use monthly indexes instead of daily indexes Raises: AlreadySaved
def save_forensic_report_to_elasticsearch(forensic_report, index_suffix=None, monthly_indexes=False): logger.debug("Saving forensic report to Elasticsearch") forensic_report = forensic_report.copy() sample_date = None if forensic_report["parsed_sample"]["date"] is not None: sample_date = forensic_report["parsed_sample"]["date"] sample_date = human_timestamp_to_datetime(sample_date) original_headers = forensic_report["parsed_sample"]["headers"] headers = OrderedDict() for original_header in original_headers: headers[original_header.lower()] = original_headers[original_header] arrival_date_human = forensic_report["arrival_date_utc"] arrival_date = human_timestamp_to_datetime(arrival_date_human) search = Search(index="dmarc_forensic*") arrival_query = {"match": {"arrival_date": arrival_date}} q = Q(arrival_query) from_ = None to_ = None subject = None if "from" in headers: from_ = headers["from"] from_query = {"match": {"sample.headers.from": from_}} q = q & Q(from_query) if "to" in headers: to_ = headers["to"] to_query = {"match": {"sample.headers.to": to_}} q = q & Q(to_query) if "subject" in headers: subject = headers["subject"] subject_query = {"match": {"sample.headers.subject": subject}} q = q & Q(subject_query) search.query = q existing = search.execute() if len(existing) > 0: raise AlreadySaved("A forensic sample to {0} from {1} " "with a subject of {2} and arrival date of {3} " "already exists in " "Elasticsearch".format(to_, from_, subject, arrival_date_human )) parsed_sample = forensic_report["parsed_sample"] sample = _ForensicSampleDoc( raw=forensic_report["sample"], headers=headers, headers_only=forensic_report["sample_headers_only"], date=sample_date, subject=forensic_report["parsed_sample"]["subject"], filename_safe_subject=parsed_sample["filename_safe_subject"], body=forensic_report["parsed_sample"]["body"] ) for address in forensic_report["parsed_sample"]["to"]: sample.add_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["reply_to"]: sample.add_reply_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["cc"]: sample.add_cc(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["bcc"]: sample.add_bcc(display_name=address["display_name"], address=address["address"]) for attachment in forensic_report["parsed_sample"]["attachments"]: sample.add_attachment(filename=attachment["filename"], content_type=attachment["mail_content_type"], sha256=attachment["sha256"]) try: forensic_doc = _ForensicReportDoc( feedback_type=forensic_report["feedback_type"], user_agent=forensic_report["user_agent"], version=forensic_report["version"], original_mail_from=forensic_report["original_mail_from"], arrival_date=arrival_date, domain=forensic_report["reported_domain"], original_envelope_id=forensic_report["original_envelope_id"], authentication_results=forensic_report["authentication_results"], delivery_results=forensic_report["delivery_result"], source_ip_address=forensic_report["source"]["ip_address"], source_country=forensic_report["source"]["country"], source_reverse_dns=forensic_report["source"]["reverse_dns"], source_base_domain=forensic_report["source"]["base_domain"], authentication_mechanisms=forensic_report[ "authentication_mechanisms"], auth_failure=forensic_report["auth_failure"], dkim_domain=forensic_report["dkim_domain"], original_rcpt_to=forensic_report["original_rcpt_to"], sample=sample ) index = "dmarc_forensic" if index_suffix: index = "{0}_{1}".format(index, index_suffix) if monthly_indexes: index_date = arrival_date.strftime("%Y-%m") else: index_date = arrival_date.strftime("%Y-%m-%d") index = "{0}-{1}".format(index, index_date) create_indexes([index]) forensic_doc.meta.index = index try: forensic_doc.save() except Exception as e: raise ElasticsearchError( "Elasticsearch error: {0}".format(e.__str__())) except KeyError as e: raise InvalidForensicReport( "Forensic report missing required field: {0}".format(e.__str__()))
363,979
Saves aggregate DMARC reports to Kafka Args: aggregate_reports (list): A list of aggregate report dictionaries to save to Kafka aggregate_topic (str): The name of the Kafka topic
def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic): if (type(aggregate_reports) == dict or type(aggregate_reports) == OrderedDict): aggregate_reports = [aggregate_reports] if len(aggregate_reports) < 1: return for report in aggregate_reports: report['date_range'] = self.generate_daterange(report) report = self.strip_metadata(report) for slice in report['records']: slice['date_range'] = report['date_range'] slice['org_name'] = report['org_name'] slice['org_email'] = report['org_email'] slice['policy_published'] = report['policy_published'] slice['report_id'] = report['report_id'] logger.debug("Sending slice.") try: logger.debug("Saving aggregate report to Kafka") self.producer.send(aggregate_topic, slice) except UnknownTopicOrPartitionError: raise KafkaError( "Kafka error: Unknown topic or partition on broker") except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__()))
363,992
Saves forensic DMARC reports to Kafka, sends individual records (slices) since Kafka requires messages to be <= 1MB by default. Args: forensic_reports (list): A list of forensic report dicts to save to Kafka forensic_topic (str): The name of the Kafka topic
def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic): if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return try: logger.debug("Saving forensic reports to Kafka") self.producer.send(forensic_topic, forensic_reports) except UnknownTopicOrPartitionError: raise KafkaError( "Kafka error: Unknown topic or partition on broker") except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: raise KafkaError( "Kafka error: {0}".format(e.__str__()))
363,993
Converts a record from a DMARC aggregate report into a more consistent format Args: record (OrderedDict): The record to convert nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds Returns: OrderedDict: The converted record
def _parse_report_record(record, nameservers=None, dns_timeout=2.0, parallel=False): if nameservers is None: nameservers = ["1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", ] record = record.copy() new_record = OrderedDict() new_record_source = get_ip_address_info(record["row"]["source_ip"], cache=IP_ADDRESS_CACHE, nameservers=nameservers, timeout=dns_timeout, parallel=parallel) new_record["source"] = new_record_source new_record["count"] = int(record["row"]["count"]) policy_evaluated = record["row"]["policy_evaluated"].copy() new_policy_evaluated = OrderedDict([("disposition", "none"), ("dkim", "fail"), ("spf", "fail"), ("policy_override_reasons", []) ]) if "disposition" in policy_evaluated: new_policy_evaluated["disposition"] = policy_evaluated["disposition"] if new_policy_evaluated["disposition"].strip().lower() == "pass": new_policy_evaluated["disposition"] = "none" if "dkim" in policy_evaluated: new_policy_evaluated["dkim"] = policy_evaluated["dkim"] if "spf" in policy_evaluated: new_policy_evaluated["spf"] = policy_evaluated["spf"] reasons = [] spf_aligned = policy_evaluated["spf"] == "pass" dkim_aligned = policy_evaluated["dkim"] == "pass" dmarc_aligned = spf_aligned or dkim_aligned new_record["alignment"] = dict() new_record["alignment"]["spf"] = spf_aligned new_record["alignment"]["dkim"] = dkim_aligned new_record["alignment"]["dmarc"] = dmarc_aligned if "reason" in policy_evaluated: if type(policy_evaluated["reason"]) == list: reasons = policy_evaluated["reason"] else: reasons = [policy_evaluated["reason"]] for reason in reasons: if "comment" not in reason: reason["comment"] = None new_policy_evaluated["policy_override_reasons"] = reasons new_record["policy_evaluated"] = new_policy_evaluated new_record["identifiers"] = record["identifiers"].copy() new_record["auth_results"] = OrderedDict([("dkim", []), ("spf", [])]) if record["auth_results"] is not None: auth_results = record["auth_results"].copy() if "spf" not in auth_results: auth_results["spf"] = [] if "dkim" not in auth_results: auth_results["dkim"] = [] else: auth_results = new_record["auth_results"].copy() if type(auth_results["dkim"]) != list: auth_results["dkim"] = [auth_results["dkim"]] for result in auth_results["dkim"]: if "domain" in result and result["domain"] is not None: new_result = OrderedDict([("domain", result["domain"])]) if "selector" in result and result["selector"] is not None: new_result["selector"] = result["selector"] else: new_result["selector"] = "none" if "result" in result and result["result"] is not None: new_result["result"] = result["result"] else: new_result["result"] = "none" new_record["auth_results"]["dkim"].append(new_result) if type(auth_results["spf"]) != list: auth_results["spf"] = [auth_results["spf"]] for result in auth_results["spf"]: new_result = OrderedDict([("domain", result["domain"])]) if "scope" in result and result["scope"] is not None: new_result["scope"] = result["scope"] else: new_result["scope"] = "mfrom" if "result" in result and result["result"] is not None: new_result["result"] = result["result"] else: new_result["result"] = "none" new_record["auth_results"]["spf"].append(new_result) if "envelope_from" not in new_record["identifiers"]: envelope_from = None if len(auth_results["spf"]) > 0: envelope_from = new_record["auth_results"]["spf"][-1]["domain"] if envelope_from is not None: envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from elif new_record["identifiers"]["envelope_from"] is None: if len(auth_results["spf"]) > 0: envelope_from = new_record["auth_results"]["spf"][-1]["domain"] if envelope_from is not None: envelope_from = str(envelope_from).lower() new_record["identifiers"]["envelope_from"] = envelope_from envelope_to = None if "envelope_to" in new_record["identifiers"]: envelope_to = new_record["identifiers"]["envelope_to"] del new_record["identifiers"]["envelope_to"] new_record["identifiers"]["envelope_to"] = envelope_to return new_record
363,994
Parses a DMARC XML report string and returns a consistent OrderedDict Args: xml (str): A string of DMARC aggregate report XML nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): Parallel processing Returns: OrderedDict: The parsed aggregate DMARC report
def parse_aggregate_report_xml(xml, nameservers=None, timeout=2.0, parallel=False): errors = [] try: xmltodict.parse(xml)["feedback"] except Exception as e: errors.append(e.__str__()) try: # Replace XML header (sometimes they are invalid) xml = xml_header_regex.sub("<?xml version=\"1.0\"?>", xml) # Remove invalid schema tags xml = xml_schema_regex.sub('', xml) report = xmltodict.parse(xml)["feedback"] report_metadata = report["report_metadata"] schema = "draft" if "version" in report: schema = report["version"] new_report = OrderedDict([("xml_schema", schema)]) new_report_metadata = OrderedDict() if report_metadata["org_name"] is None: if report_metadata["email"] is not None: report_metadata["org_name"] = report_metadata[ "email"].split("@")[-1] org_name = report_metadata["org_name"] if org_name is not None: org_name = get_base_domain(org_name) new_report_metadata["org_name"] = org_name new_report_metadata["org_email"] = report_metadata["email"] extra = None if "extra_contact_info" in report_metadata: extra = report_metadata["extra_contact_info"] new_report_metadata["org_extra_contact_info"] = extra new_report_metadata["report_id"] = report_metadata["report_id"] report_id = new_report_metadata["report_id"] report_id = report_id.replace("<", "").replace(">", "").split("@")[0] new_report_metadata["report_id"] = report_id date_range = report["report_metadata"]["date_range"] date_range["begin"] = timestamp_to_human(date_range["begin"]) date_range["end"] = timestamp_to_human(date_range["end"]) new_report_metadata["begin_date"] = date_range["begin"] new_report_metadata["end_date"] = date_range["end"] if "error" in report["report_metadata"]: if type(report["report_metadata"]["error"]) != list: errors = [report["report_metadata"]["error"]] else: errors = report["report_metadata"]["error"] new_report_metadata["errors"] = errors new_report["report_metadata"] = new_report_metadata records = [] policy_published = report["policy_published"] new_policy_published = OrderedDict() new_policy_published["domain"] = policy_published["domain"] adkim = "r" if "adkim" in policy_published: if policy_published["adkim"] is not None: adkim = policy_published["adkim"] new_policy_published["adkim"] = adkim aspf = "r" if "aspf" in policy_published: if policy_published["aspf"] is not None: aspf = policy_published["aspf"] new_policy_published["aspf"] = aspf new_policy_published["p"] = policy_published["p"] sp = new_policy_published["p"] if "sp" in policy_published: if policy_published["sp"] is not None: sp = report["policy_published"]["sp"] new_policy_published["sp"] = sp pct = "100" if "pct" in policy_published: if policy_published["pct"] is not None: pct = report["policy_published"]["pct"] new_policy_published["pct"] = pct fo = "0" if "fo" in policy_published: if policy_published["fo"] is not None: fo = report["policy_published"]["fo"] new_policy_published["fo"] = fo new_report["policy_published"] = new_policy_published if type(report["record"]) == list: for record in report["record"]: report_record = _parse_report_record(record, nameservers=nameservers, dns_timeout=timeout, parallel=parallel) records.append(report_record) else: report_record = _parse_report_record(report["record"], nameservers=nameservers, dns_timeout=timeout, parallel=parallel) records.append(report_record) new_report["records"] = records return new_report except expat.ExpatError as error: raise InvalidAggregateReport( "Invalid XML: {0}".format(error.__str__())) except KeyError as error: raise InvalidAggregateReport( "Missing field: {0}".format(error.__str__())) except AttributeError: raise InvalidAggregateReport("Report missing required section") except Exception as error: raise InvalidAggregateReport( "Unexpected error: {0}".format(error.__str__()))
363,995
Extracts xml from a zip or gzip file at the given path, file-like object, or bytes. Args: input_: A path to a file, a file like object, or bytes Returns: str: The extracted XML
def extract_xml(input_): if type(input_) == str: file_object = open(input_, "rb") elif type(input_) == bytes: file_object = BytesIO(input_) else: file_object = input_ try: header = file_object.read(6) file_object.seek(0) if header.startswith(MAGIC_ZIP): _zip = zipfile.ZipFile(file_object) xml = _zip.open(_zip.namelist()[0]).read().decode() elif header.startswith(MAGIC_GZIP): xml = GzipFile(fileobj=file_object).read().decode() elif header.startswith(MAGIC_XML): xml = file_object.read().decode() else: file_object.close() raise InvalidAggregateReport("Not a valid zip, gzip, or xml file") file_object.close() except UnicodeDecodeError: raise InvalidAggregateReport("File objects must be opened in binary " "(rb) mode") except Exception as error: raise InvalidAggregateReport( "Invalid archive file: {0}".format(error.__str__())) return xml
363,996
Parses a file at the given path, a file-like object. or bytes as a aggregate DMARC report Args: _input: A path to a file, a file like object, or bytes nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) dns_timeout (float): Sets the DNS timeout in seconds parallel (bool): Parallel processing Returns: OrderedDict: The parsed DMARC aggregate report
def parse_aggregate_report_file(_input, nameservers=None, dns_timeout=2.0, parallel=False): xml = extract_xml(_input) return parse_aggregate_report_xml(xml, nameservers=nameservers, timeout=dns_timeout, parallel=parallel)
363,997
Converts one or more parsed aggregate reports to flat CSV format, including headers Args: reports: A parsed aggregate report or list of parsed aggregate reports Returns: str: Parsed aggregate report data in flat CSV format, including headers
def parsed_aggregate_reports_to_csv(reports): def to_str(obj): return str(obj).lower() fields = ["xml_schema", "org_name", "org_email", "org_extra_contact_info", "report_id", "begin_date", "end_date", "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "count", "disposition", "dkim_alignment", "spf_alignment", "policy_override_reasons", "policy_override_comments", "envelope_from", "header_from", "envelope_to", "dkim_domains", "dkim_selectors", "dkim_results", "spf_domains", "spf_scopes", "spf_results"] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) writer.writeheader() if type(reports) == OrderedDict: reports = [reports] for report in reports: xml_schema = report["xml_schema"] org_name = report["report_metadata"]["org_name"] org_email = report["report_metadata"]["org_email"] org_extra_contact = report["report_metadata"]["org_extra_contact_info"] report_id = report["report_metadata"]["report_id"] begin_date = report["report_metadata"]["begin_date"] end_date = report["report_metadata"]["end_date"] errors = "|".join(report["report_metadata"]["errors"]) domain = report["policy_published"]["domain"] adkim = report["policy_published"]["adkim"] aspf = report["policy_published"]["aspf"] p = report["policy_published"]["p"] sp = report["policy_published"]["sp"] pct = report["policy_published"]["pct"] fo = report["policy_published"]["fo"] report_dict = dict(xml_schema=xml_schema, org_name=org_name, org_email=org_email, org_extra_contact_info=org_extra_contact, report_id=report_id, begin_date=begin_date, end_date=end_date, errors=errors, domain=domain, adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo) for record in report["records"]: row = report_dict row["source_ip_address"] = record["source"]["ip_address"] row["source_country"] = record["source"]["country"] row["source_reverse_dns"] = record["source"]["reverse_dns"] row["source_base_domain"] = record["source"]["base_domain"] row["count"] = record["count"] row["disposition"] = record["policy_evaluated"]["disposition"] row["spf_alignment"] = record["policy_evaluated"]["spf"] row["dkim_alignment"] = record["policy_evaluated"]["dkim"] policy_override_reasons = list(map( lambda r: r["type"], record["policy_evaluated"] ["policy_override_reasons"])) policy_override_comments = list(map( lambda r: r["comment"] or "none", record["policy_evaluated"] ["policy_override_reasons"])) row["policy_override_reasons"] = ",".join( policy_override_reasons) row["policy_override_comments"] = "|".join( policy_override_comments) row["envelope_from"] = record["identifiers"]["envelope_from"] row["header_from"] = record["identifiers"]["header_from"] envelope_to = record["identifiers"]["envelope_to"] row["envelope_to"] = envelope_to dkim_domains = [] dkim_selectors = [] dkim_results = [] for dkim_result in record["auth_results"]["dkim"]: dkim_domains.append(dkim_result["domain"]) if "selector" in dkim_result: dkim_selectors.append(dkim_result["selector"]) dkim_results.append(dkim_result["result"]) row["dkim_domains"] = ",".join(map(to_str, dkim_domains)) row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors)) row["dkim_results"] = ",".join(map(to_str, dkim_results)) spf_domains = [] spf_scopes = [] spf_results = [] for spf_result in record["auth_results"]["spf"]: spf_domains.append(spf_result["domain"]) spf_scopes.append(spf_result["scope"]) spf_results.append(spf_result["result"]) row["spf_domains"] = ",".join(map(to_str, spf_domains)) row["spf_scopes"] = ",".join(map(to_str, spf_scopes)) row["spf_results"] = ",".join(map(to_str, dkim_results)) writer.writerow(row) csv_file_object.flush() return csv_file_object.getvalue()
363,998
Converts one or more parsed forensic reports to flat CSV format, including headers Args: reports: A parsed forensic report or list of parsed forensic reports Returns: str: Parsed forensic report data in flat CSV format, including headers
def parsed_forensic_reports_to_csv(reports): fields = ["feedback_type", "user_agent", "version", "original_envelope_id", "original_mail_from", "original_rcpt_to", "arrival_date", "arrival_date_utc", "subject", "message_id", "authentication_results", "dkim_domain", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "delivery_result", "auth_failure", "reported_domain", "authentication_mechanisms", "sample_headers_only"] if type(reports) == OrderedDict: reports = [reports] csv_file = StringIO() csv_writer = DictWriter(csv_file, fieldnames=fields) csv_writer.writeheader() for report in reports: row = report.copy() row["source_ip_address"] = report["source"]["ip_address"] row["source_reverse_dns"] = report["source"]["reverse_dns"] row["source_base_domain"] = report["source"]["base_domain"] row["source_country"] = report["source"]["country"] del row["source"] row["subject"] = report["parsed_sample"]["subject"] row["auth_failure"] = ",".join(report["auth_failure"]) authentication_mechanisms = report["authentication_mechanisms"] row["authentication_mechanisms"] = ",".join( authentication_mechanisms) del row["sample"] del row["parsed_sample"] csv_writer.writerow(row) return csv_file.getvalue()
364,000
Returns a list of an IMAP server's capabilities Args: server (imapclient.IMAPClient): An instance of imapclient.IMAPClient Returns (list): A list of capabilities
def get_imap_capabilities(server): capabilities = list(map(str, list(server.capabilities()))) for i in range(len(capabilities)): capabilities[i] = str(capabilities[i]).replace("b'", "").replace("'", "") logger.debug("IMAP server supports: {0}".format(capabilities)) return capabilities
364,003
Save report data in the given directory Args: results (OrderedDict): Parsing results output_directory: The patch to the directory to save in
def save_output(results, output_directory="output"): aggregate_reports = results["aggregate_reports"] forensic_reports = results["forensic_reports"] if os.path.exists(output_directory): if not os.path.isdir(output_directory): raise ValueError("{0} is not a directory".format(output_directory)) else: os.makedirs(output_directory) with open("{0}".format(os.path.join(output_directory, "aggregate.json")), "w", newline="\n", encoding="utf-8") as agg_json: agg_json.write(json.dumps(aggregate_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "aggregate.csv")), "w", newline="\n", encoding="utf-8") as agg_csv: csv = parsed_aggregate_reports_to_csv(aggregate_reports) agg_csv.write(csv) with open("{0}".format(os.path.join(output_directory, "forensic.json")), "w", newline="\n", encoding="utf-8") as for_json: for_json.write(json.dumps(forensic_reports, ensure_ascii=False, indent=2)) with open("{0}".format(os.path.join(output_directory, "forensic.csv")), "w", newline="\n", encoding="utf-8") as for_csv: csv = parsed_forensic_reports_to_csv(forensic_reports) for_csv.write(csv) samples_directory = os.path.join(output_directory, "samples") if not os.path.exists(samples_directory): os.makedirs(samples_directory) sample_filenames = [] for forensic_report in forensic_reports: sample = forensic_report["sample"] message_count = 0 parsed_sample = forensic_report["parsed_sample"] subject = parsed_sample["filename_safe_subject"] filename = subject while filename in sample_filenames: message_count += 1 filename = "{0} ({1})".format(subject, message_count) sample_filenames.append(filename) filename = "{0}.eml".format(filename) path = os.path.join(samples_directory, filename) with open(path, "w", newline="\n", encoding="utf-8") as sample_file: sample_file.write(sample)
364,005
Creates a zip file of parsed report output Args: results (OrderedDict): The parsed results Returns: bytes: zip file bytes
def get_report_zip(results): def add_subdir(root_path, subdir): subdir_path = os.path.join(root_path, subdir) for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path): for subdir_file in subdir_files: subdir_file_path = os.path.join(root_path, subdir, subdir_file) if os.path.isfile(subdir_file_path): rel_path = os.path.relpath(subdir_root, subdir_file_path) subdir_arc_name = os.path.join(rel_path, subdir_file) zip_file.write(subdir_file_path, subdir_arc_name) for subdir in subdir_dirs: add_subdir(subdir_path, subdir) storage = BytesIO() tmp_dir = tempfile.mkdtemp() try: save_output(results, tmp_dir) with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file: for root, dirs, files in os.walk(tmp_dir): for file in files: file_path = os.path.join(root, file) if os.path.isfile(file_path): arcname = os.path.join(os.path.relpath(root, tmp_dir), file) zip_file.write(file_path, arcname) for directory in dirs: dir_path = os.path.join(root, directory) if os.path.isdir(dir_path): zip_file.write(dir_path, directory) add_subdir(root, directory) finally: shutil.rmtree(tmp_dir) return storage.getvalue()
364,006
Initializes the HECClient Args: url (str): The URL of the HEC access_token (str): The HEC access token index (str): The name of the index source (str): The source name verify (bool): Verify SSL certificates timeout (float): Number of seconds to wait for the server to send data before giving up
def __init__(self, url, access_token, index, source="parsedmarc", verify=True, timeout=60): url = urlparse(url) self.url = "{0}://{1}/services/collector/event/1.0".format(url.scheme, url.netloc) self.access_token = access_token.lstrip("Splunk ") self.index = index self.host = socket.getfqdn() self.source = source self.session = requests.Session() self.timeout = timeout self.session.verify = verify self._common_data = dict(host=self.host, source=self.source, index=self.index) self.session.headers = { "User-Agent": "parsedmarc/{0}".format(__version__), "Authorization": "Splunk {0}".format(self.access_token) }
364,009
Saves aggregate DMARC reports to Splunk Args: aggregate_reports: A list of aggregate report dictionaries to save in Splunk
def save_aggregate_reports_to_splunk(self, aggregate_reports): logger.debug("Saving aggregate reports to Splunk") if type(aggregate_reports) == dict: aggregate_reports = [aggregate_reports] if len(aggregate_reports) < 1: return data = self._common_data.copy() json_str = "" for report in aggregate_reports: for record in report["records"]: new_report = dict() for metadata in report["report_metadata"]: new_report[metadata] = report["report_metadata"][metadata] new_report["published_policy"] = report["policy_published"] new_report["source_ip_address"] = record["source"][ "ip_address"] new_report["source_country"] = record["source"]["country"] new_report["source_reverse_dns"] = record["source"][ "reverse_dns"] new_report["source_base_domain"] = record["source"][ "base_domain"] new_report["message_count"] = record["count"] new_report["disposition"] = record["policy_evaluated"][ "disposition" ] new_report["spf_aligned"] = record["alignment"]["spf"] new_report["dkim_aligned"] = record["alignment"]["dkim"] new_report["passed_dmarc"] = record["alignment"]["dmarc"] new_report["header_from"] = record["identifiers"][ "header_from"] new_report["envelope_from"] = record["identifiers"][ "envelope_from"] if "dkim" in record["auth_results"]: new_report["dkim_results"] = record["auth_results"][ "dkim"] if "spf" in record["auth_results"]: new_report["spf_results"] = record["auth_results"][ "spf"] data["sourcetype"] = "dmarc:aggregate" timestamp = human_timestamp_to_timestamp( new_report["begin_date"]) data["time"] = timestamp data["event"] = new_report.copy() json_str += "{0}\n".format(json.dumps(data)) if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if response["code"] != 0: raise SplunkError(response["text"])
364,010
Saves forensic DMARC reports to Splunk Args: forensic_reports (list): A list of forensic report dictionaries to save in Splunk
def save_forensic_reports_to_splunk(self, forensic_reports): logger.debug("Saving forensic reports to Splunk") if type(forensic_reports) == dict: forensic_reports = [forensic_reports] if len(forensic_reports) < 1: return json_str = "" for report in forensic_reports: data = self._common_data.copy() data["sourcetype"] = "dmarc:forensic" timestamp = human_timestamp_to_timestamp( report["arrival_date_utc"]) data["time"] = timestamp data["event"] = report.copy() json_str += "{0}\n".format(json.dumps(data)) if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) if response["code"] != 0: raise SplunkError(response["text"])
364,011
Decodes a base64 string, with padding being optional Args: data: A base64 encoded string Returns: bytes: The decoded bytes
def decode_base64(data): data = bytes(data, encoding="ascii") missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return base64.b64decode(data)
364,012
Gets the base domain name for the given domain .. note:: Results are based on a list of public domain suffixes at https://publicsuffix.org/list/public_suffix_list.dat. Args: domain (str): A domain or subdomain use_fresh_psl (bool): Download a fresh Public Suffix List Returns: str: The base domain of the given domain
def get_base_domain(domain, use_fresh_psl=False): psl_path = os.path.join(tempdir, "public_suffix_list.dat") def download_psl(): url = "https://publicsuffix.org/list/public_suffix_list.dat" # Use a browser-like user agent string to bypass some proxy blocks headers = {"User-Agent": USER_AGENT} fresh_psl = requests.get(url, headers=headers).text with open(psl_path, "w", encoding="utf-8") as fresh_psl_file: fresh_psl_file.write(fresh_psl) if use_fresh_psl: if not os.path.exists(psl_path): download_psl() else: psl_age = datetime.now() - datetime.fromtimestamp( os.stat(psl_path).st_mtime) if psl_age > timedelta(hours=24): try: download_psl() except Exception as error: logger.warning( "Failed to download an updated PSL {0}".format(error)) with open(psl_path, encoding="utf-8") as psl_file: psl = publicsuffix2.PublicSuffixList(psl_file) return psl.get_public_suffix(domain) else: return publicsuffix2.get_public_suffix(domain)
364,013
Queries DNS Args: domain (str): The domain or subdomain to query about record_type (str): The record type to query for cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds Returns: list: A list of answers
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0): domain = str(domain).lower() record_type = record_type.upper() cache_key = "{0}_{1}".format(domain, record_type) if cache: records = cache.get(cache_key, None) if records: return records resolver = dns.resolver.Resolver() timeout = float(timeout) if nameservers is None: nameservers = ["1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", ] resolver.nameservers = nameservers resolver.timeout = timeout resolver.lifetime = timeout if record_type == "TXT": resource_records = list(map( lambda r: r.strings, resolver.query(domain, record_type, tcp=True))) _resource_record = [ resource_record[0][:0].join(resource_record) for resource_record in resource_records if resource_record] records = [r.decode() for r in _resource_record] else: records = list(map( lambda r: r.to_text().replace('"', '').rstrip("."), resolver.query(domain, record_type, tcp=True))) if cache: cache[cache_key] = records return records
364,014
Resolves an IP address to a hostname using a reverse DNS query Args: ip_address (str): The IP address to resolve cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS query timeout in seconds Returns: str: The reverse DNS hostname (if any)
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0): hostname = None try: address = dns.reversename.from_address(ip_address) hostname = query_dns(address, "PTR", cache=cache, nameservers=nameservers, timeout=timeout)[0] except dns.exception.DNSException: pass return hostname
364,015