id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
174,519
import cProfile from io import BytesIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile __version__ = "4.12.2" import sys if sys.version_info.major < 3: raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.') class BeautifulSoup(Tag): """A data structure representing a parsed HTML or XML document. Most of the methods you'll call on a BeautifulSoup object are inherited from PageElement or Tag. Internally, this class defines the basic interface called by the tree builders when converting an HTML/XML document into a data structure. The interface abstracts away the differences between parsers. To write a new tree builder, you'll need to understand these methods as a whole. These methods will be called by the BeautifulSoup constructor: * reset() * feed(markup) The tree builder may call these methods from its feed() implementation: * handle_starttag(name, attrs) # See note about return value * handle_endtag(name) * handle_data(data) # Appends to the current data node * endData(containerClass) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's <br> tag), call handle_starttag and then handle_endtag. """ # Since BeautifulSoup subclasses Tag, it's possible to treat it as # a Tag with a .name. This name makes it clear the BeautifulSoup # object isn't a real markup tag. ROOT_TAG_NAME = '[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] # A string containing all ASCII whitespace characters, used in # endData() to detect data chunks that seem 'empty'. ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, element_classes=None, **kwargs): """Constructor. :param markup: A string or a file-like object representing markup to be parsed. :param features: Desirable features of the parser to be used. This may be the name of a specific parser ("lxml", "lxml-xml", "html.parser", or "html5lib") or it may be the type of markup to be used ("html", "html5", "xml"). It's recommended that you name a specific parser, so that Beautiful Soup gives you the same results across platforms and virtual environments. :param builder: A TreeBuilder subclass to instantiate (or instance to use) instead of looking one up based on `features`. You only need to use this if you've implemented a custom TreeBuilder. :param parse_only: A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered. This is useful when parsing part of a document that would otherwise be too large to fit into memory. :param from_encoding: A string indicating the encoding of the document to be parsed. Pass this in if Beautiful Soup is guessing wrongly about the document's encoding. :param exclude_encodings: A list of strings indicating encodings known to be wrong. Pass this in if you don't know the document's encoding but you know Beautiful Soup's guess is wrong. :param element_classes: A dictionary mapping BeautifulSoup classes like Tag and NavigableString, to other classes you'd like to be instantiated instead as the parse tree is built. This is useful for subclassing Tag or NavigableString to modify default behavior. :param kwargs: For backwards compatibility purposes, the constructor accepts certain keyword arguments used in Beautiful Soup 3. None of these arguments do anything in Beautiful Soup 4; they will result in a warning and then be ignored. Apart from this, any keyword arguments passed into the BeautifulSoup constructor are propagated to the TreeBuilder constructor. This makes it possible to configure a TreeBuilder by passing in arguments, not just by saying which one to use. """ if 'convertEntities' in kwargs: del kwargs['convertEntities'] warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. Suggest you use " "features='lxml' for HTML and features='lxml-xml' for " "XML.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name), DeprecationWarning, stacklevel=3 ) return kwargs.pop(old_name) return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if from_encoding and isinstance(markup, str): warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") from_encoding = None self.element_classes = element_classes or dict() # We need this information to track whether or not the builder # was specified well enough that we can omit the 'you need to # specify a parser' warning. original_builder = builder original_features = features if isinstance(builder, type): # A builder class was passed in; it needs to be instantiated. builder_class = builder builder = None elif builder is None: if isinstance(features, str): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) # At this point either we have a TreeBuilder instance in # builder, or we have a builder_class that we can instantiate # with the remaining **kwargs. if builder is None: builder = builder_class(**kwargs) if not original_builder and not ( original_features == builder.NAME or original_features in builder.ALTERNATE_NAMES ) and markup: # The user did not tell us which TreeBuilder to use, # and we had to guess. Issue a warning. if builder.is_xml: markup_type = "XML" else: markup_type = "HTML" # This code adapted from warnings.py so that we get the same line # of code as our warnings.warn() call gets, even if the answer is wrong # (as it may be in a multithreading situation). caller = None try: caller = sys._getframe(1) except ValueError: pass if caller: globals = caller.f_globals line_number = caller.f_lineno else: globals = sys.__dict__ line_number= 1 filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] if filename: # If there is no filename at all, the user is most likely in a REPL, # and the warning is not necessary. values = dict( filename=filename, line_number=line_number, parser=builder.NAME, markup_type=markup_type ) warnings.warn( self.NO_PARSER_SPECIFIED_WARNING % values, GuessedAtParserWarning, stacklevel=2 ) else: if kwargs: warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.") self.builder = builder self.is_xml = builder.is_xml self.known_xml = self.is_xml self._namespaces = dict() self.parse_only = parse_only if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256 and ( (isinstance(markup, bytes) and not b'<' in markup) or (isinstance(markup, str) and not '<' in markup) ): # Issue warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # since that is sometimes the intended behavior. if not self._markup_is_url(markup): self._markup_resembles_filename(markup) rejections = [] success = False for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup( markup, from_encoding, exclude_encodings=exclude_encodings)): self.reset() self.builder.initialize_soup(self) try: self._feed() success = True break except ParserRejectedMarkup as e: rejections.append(e) pass if not success: other_exceptions = [str(e) for e in rejections] raise ParserRejectedMarkup( "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions) ) # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def _clone(self): """Create a new BeautifulSoup object with the same TreeBuilder, but not associated with any markup. This is the first step of the deepcopy process. """ clone = type(self)("", None, self.builder) # Keep track of the encoding of the original document, # since we won't be parsing it again. clone.original_encoding = self.original_encoding return clone def __getstate__(self): # Frequently a tree builder can't be pickled. d = dict(self.__dict__) if 'builder' in d and d['builder'] is not None and not self.builder.picklable: d['builder'] = type(self.builder) # Store the contents as a Unicode string. d['contents'] = [] d['markup'] = self.decode() # If _most_recent_element is present, it's a Tag object left # over from initial parse. It might not be picklable and we # don't need it. if '_most_recent_element' in d: del d['_most_recent_element'] return d def __setstate__(self, state): # If necessary, restore the TreeBuilder by looking it up. self.__dict__ = state if isinstance(self.builder, type): self.builder = self.builder() elif not self.builder: # We don't know which builder was used to build this # parse tree, so use a default we know is always available. self.builder = HTMLParserTreeBuilder() self.builder.soup = self self.reset() self._feed() return state def _decode_markup(cls, markup): """Ensure `markup` is bytes so it's safe to send into warnings.warn. TODO: warnings.warn had this problem back in 2010 but it might not anymore. """ if isinstance(markup, bytes): decoded = markup.decode('utf-8', 'replace') else: decoded = markup return decoded def _markup_is_url(cls, markup): """Error-handling method to raise a warning if incoming markup looks like a URL. :param markup: A string. :return: Whether or not the markup resembles a URL closely enough to justify a warning. """ if isinstance(markup, bytes): space = b' ' cant_start_with = (b"http:", b"https:") elif isinstance(markup, str): space = ' ' cant_start_with = ("http:", "https:") else: return False if any(markup.startswith(prefix) for prefix in cant_start_with): if not space in markup: warnings.warn( 'The input looks more like a URL than markup. You may want to use' ' an HTTP client like requests to get the document behind' ' the URL, and feed that document to Beautiful Soup.', MarkupResemblesLocatorWarning, stacklevel=3 ) return True return False def _markup_resembles_filename(cls, markup): """Error-handling method to raise a warning if incoming markup resembles a filename. :param markup: A bytestring or string. :return: Whether or not the markup resembles a filename closely enough to justify a warning. """ path_characters = '/\\' extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt'] if isinstance(markup, bytes): path_characters = path_characters.encode("utf8") extensions = [x.encode('utf8') for x in extensions] filelike = False if any(x in markup for x in path_characters): filelike = True else: lower = markup.lower() if any(lower.endswith(ext) for ext in extensions): filelike = True if filelike: warnings.warn( 'The input looks more like a filename than markup. You may' ' want to open this file and pass the filehandle into' ' Beautiful Soup.', MarkupResemblesLocatorWarning, stacklevel=3 ) return True return False def _feed(self): """Internal method that parses previously set markup, creating a large number of Tag and NavigableString objects. """ # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): """Reset this object to a state as though it had never parsed any markup. """ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.open_tag_counter = Counter() self.preserve_whitespace_tag_stack = [] self.string_container_stack = [] self._most_recent_element = None self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, sourceline=None, sourcepos=None, **kwattrs): """Create a new Tag associated with this BeautifulSoup object. :param name: The name of the new Tag. :param namespace: The URI of the new Tag's XML namespace, if any. :param prefix: The prefix for the new Tag's XML namespace, if any. :param attrs: A dictionary of this Tag's attribute values; can be used instead of `kwattrs` for attributes like 'class' that are reserved words in Python. :param sourceline: The line number where this tag was (purportedly) found in its source document. :param sourcepos: The character position within `sourceline` where this tag was (purportedly) found. :param kwattrs: Keyword arguments for the new Tag's attribute values. """ kwattrs.update(attrs) return self.element_classes.get(Tag, Tag)( None, self.builder, name, namespace, nsprefix, kwattrs, sourceline=sourceline, sourcepos=sourcepos ) def string_container(self, base_class=None): container = base_class or NavigableString # There may be a general override of NavigableString. container = self.element_classes.get( container, container ) # On top of that, we may be inside a tag that needs a special # container class. if self.string_container_stack and container is NavigableString: container = self.builder.string_containers.get( self.string_container_stack[-1].name, container ) return container def new_string(self, s, subclass=None): """Create a new NavigableString associated with this BeautifulSoup object. """ container = self.string_container(subclass) return container(s) def insert_before(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): """Internal method called by _popToTag when a tag is closed.""" tag = self.tagStack.pop() if tag.name in self.open_tag_counter: self.open_tag_counter[tag.name] -= 1 if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() if self.string_container_stack and tag == self.string_container_stack[-1]: self.string_container_stack.pop() #print("Pop", tag.name) if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): """Internal method called by handle_starttag when a tag is opened.""" #print("Push", tag.name) if self.currentTag is not None: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name != self.ROOT_TAG_NAME: self.open_tag_counter[tag.name] += 1 if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) if tag.name in self.builder.string_containers: self.string_container_stack.append(tag) def endData(self, containerClass=None): """Method called by the TreeBuilder when the end of a data segment occurs. """ if self.current_data: current_data = ''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return containerClass = self.string_container(containerClass) o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Method called by the TreeBuilder to integrate an object into the parse tree.""" if parent is None: parent = self.currentTag if most_recent_element is not None: previous_element = most_recent_element else: previous_element = self._most_recent_element next_element = previous_sibling = next_sibling = None if isinstance(o, Tag): next_element = o.next_element next_sibling = o.next_sibling previous_sibling = o.previous_sibling if previous_element is None: previous_element = o.previous_element fix = parent.next_element is not None o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) self._most_recent_element = o parent.contents.append(o) # Check if we are inserting into an already parsed node. if fix: self._linkage_fixer(parent) def _linkage_fixer(self, el): """Make sure linkage of this fragment is sound.""" first = el.contents[0] child = el.contents[-1] descendant = child if child is first and el.parent is not None: # Parent should be linked to first child el.next_element = child # We are no longer linked to whatever this element is prev_el = child.previous_element if prev_el is not None and prev_el is not el: prev_el.next_element = None # First child should be linked to the parent, and no previous siblings. child.previous_element = el child.previous_sibling = None # We have no sibling as we've been appended as the last. child.next_sibling = None # This index is a tag, dig deeper for a "last descendant" if isinstance(child, Tag) and child.contents: descendant = child._last_descendant(False) # As the final step, link last descendant. It should be linked # to the parent's next sibling (if found), else walk up the chain # and find a parent with a sibling. It should have no next sibling. descendant.next_element = None descendant.next_sibling = None target = el while True: if target is None: break elif target.next_sibling is not None: descendant.next_element = target.next_sibling target.next_sibling.previous_element = child break target = target.parent def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If there are no open tags with the given name, nothing will be popped. :param name: Pop up to the most recent tag with this name. :param nsprefix: The namespace prefix that goes with `name`. :param inclusivePop: It this is false, pops the tag stack up to but *not* including the most recent instqance of the given tag. """ #print("Popping to %s" % name) if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): if not self.open_tag_counter.get(name): break t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None, sourcepos=None, namespaces=None): """Called by the tree builder when a new tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. :param attrs: A dictionary of attribute values. :param sourceline: The line number where this tag was found in its source document. :param sourcepos: The character position within `sourceline` where this tag was found. :param namespaces: A dictionary of all namespace prefix mappings currently in scope in the document. If this method returns None, the tag was rejected by an active SoupStrainer. You should proceed as if the tag had not occurred in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print("Start tag %s: %s" % (name, attrs)) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = self.element_classes.get(Tag, Tag)( self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element, sourceline=sourceline, sourcepos=sourcepos, namespaces=namespaces ) if tag is None: return tag if self._most_recent_element is not None: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): """Called by the tree builder when an ending tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. """ #print("End tag: " + name) self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): """Called by the tree builder when a chunk of textual data is encountered.""" self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal", iterator=None): """Returns a string or Unicode representation of the parse tree as an HTML or XML document. :param pretty_print: If this is True, indentation will be used to make the document more readable. :param eventual_encoding: The encoding of the final document. If this is None, the document will be a Unicode string. """ if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: # This is a special Python encoding; it can't actually # go into an XML document because it means nothing # outside of Python. eventual_encoding = None if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = '<?xml version="1.0"%s?>\n' % encoding_part else: prefix = '' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter, iterator) builder_registry = TreeBuilderRegistry() The provided code snippet includes necessary dependencies for implementing the `diagnose` function. Write a Python function `def diagnose(data)` to solve the following problem: Diagnostic suite for isolating common problems. :param data: A string containing markup that needs to be explained. :return: None; diagnostics are printed to standard output. Here is the function: def diagnose(data): """Diagnostic suite for isolating common problems. :param data: A string containing markup that needs to be explained. :return: None; diagnostics are printed to standard output. """ print(("Diagnostic running on Beautiful Soup %s" % __version__)) print(("Python version %s" % sys.version)) basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: for builder in builder_registry.builders: if name in builder.features: break else: basic_parsers.remove(name) print(( "I noticed that %s is not installed. Installing it may help." % name)) if 'lxml' in basic_parsers: basic_parsers.append("lxml-xml") try: from lxml import etree print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))) except ImportError as e: print( "lxml is not installed or couldn't be imported.") if 'html5lib' in basic_parsers: try: import html5lib print(("Found html5lib version %s" % html5lib.__version__)) except ImportError as e: print( "html5lib is not installed or couldn't be imported.") if hasattr(data, 'read'): data = data.read() for parser in basic_parsers: print(("Trying to parse your markup with %s" % parser)) success = False try: soup = BeautifulSoup(data, features=parser) success = True except Exception as e: print(("%s could not parse the markup." % parser)) traceback.print_exc() if success: print(("Here's what %s did with the markup:" % parser)) print((soup.prettify())) print(("-" * 80))
Diagnostic suite for isolating common problems. :param data: A string containing markup that needs to be explained. :return: None; diagnostics are printed to standard output.
174,520
import cProfile from io import BytesIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile class BytesIO(BufferedIOBase, BinaryIO): def __init__(self, initial_bytes: bytes = ...) -> None: ... # BytesIO does not contain a "name" field. This workaround is necessary # to allow BytesIO sub-classes to add this field, as it is defined # as a read-only property on IO[]. name: Any def __enter__(self: _T) -> _T: ... def getvalue(self) -> bytes: ... def getbuffer(self) -> memoryview: ... if sys.version_info >= (3, 7): def read1(self, __size: Optional[int] = ...) -> bytes: ... else: def read1(self, __size: Optional[int]) -> bytes: ... # type: ignore The provided code snippet includes necessary dependencies for implementing the `lxml_trace` function. Write a Python function `def lxml_trace(data, html=True, **kwargs)` to solve the following problem: Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. You can use this to determine whether an lxml-specific problem is in Beautiful Soup's lxml tree builders or in lxml itself. :param data: Some markup. :param html: If True, markup will be parsed with lxml's HTML parser. if False, lxml's XML parser will be used. Here is the function: def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. You can use this to determine whether an lxml-specific problem is in Beautiful Soup's lxml tree builders or in lxml itself. :param data: Some markup. :param html: If True, markup will be parsed with lxml's HTML parser. if False, lxml's XML parser will be used. """ from lxml import etree recover = kwargs.pop('recover', True) if isinstance(data, str): data = data.encode("utf8") reader = BytesIO(data) for event, element in etree.iterparse( reader, html=html, recover=recover, **kwargs ): print(("%s, %4s, %s" % (event, element.tag, element.text)))
Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. You can use this to determine whether an lxml-specific problem is in Beautiful Soup's lxml tree builders or in lxml itself. :param data: Some markup. :param html: If True, markup will be parsed with lxml's HTML parser. if False, lxml's XML parser will be used.
174,521
import cProfile from io import BytesIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile class AnnouncingParser(HTMLParser): """Subclass of HTMLParser that announces parse events, without doing anything else. You can use this to get a picture of how html.parser sees a given document. The easiest way to do this is to call `htmlparser_trace`. """ def _p(self, s): print(s) def handle_starttag(self, name, attrs): self._p("%s START" % name) def handle_endtag(self, name): self._p("%s END" % name) def handle_data(self, data): self._p("%s DATA" % data) def handle_charref(self, name): self._p("%s CHARREF" % name) def handle_entityref(self, name): self._p("%s ENTITYREF" % name) def handle_comment(self, data): self._p("%s COMMENT" % data) def handle_decl(self, data): self._p("%s DECL" % data) def unknown_decl(self, data): self._p("%s UNKNOWN-DECL" % data) def handle_pi(self, data): self._p("%s PI" % data) The provided code snippet includes necessary dependencies for implementing the `htmlparser_trace` function. Write a Python function `def htmlparser_trace(data)` to solve the following problem: Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. :param data: Some markup. Here is the function: def htmlparser_trace(data): """Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. :param data: Some markup. """ parser = AnnouncingParser() parser.feed(data)
Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. :param data: Some markup.
174,522
import cProfile from io import BytesIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" __version__ = "4.12.2" class BeautifulSoup(Tag): """A data structure representing a parsed HTML or XML document. Most of the methods you'll call on a BeautifulSoup object are inherited from PageElement or Tag. Internally, this class defines the basic interface called by the tree builders when converting an HTML/XML document into a data structure. The interface abstracts away the differences between parsers. To write a new tree builder, you'll need to understand these methods as a whole. These methods will be called by the BeautifulSoup constructor: * reset() * feed(markup) The tree builder may call these methods from its feed() implementation: * handle_starttag(name, attrs) # See note about return value * handle_endtag(name) * handle_data(data) # Appends to the current data node * endData(containerClass) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's <br> tag), call handle_starttag and then handle_endtag. """ # Since BeautifulSoup subclasses Tag, it's possible to treat it as # a Tag with a .name. This name makes it clear the BeautifulSoup # object isn't a real markup tag. ROOT_TAG_NAME = '[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] # A string containing all ASCII whitespace characters, used in # endData() to detect data chunks that seem 'empty'. ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, element_classes=None, **kwargs): """Constructor. :param markup: A string or a file-like object representing markup to be parsed. :param features: Desirable features of the parser to be used. This may be the name of a specific parser ("lxml", "lxml-xml", "html.parser", or "html5lib") or it may be the type of markup to be used ("html", "html5", "xml"). It's recommended that you name a specific parser, so that Beautiful Soup gives you the same results across platforms and virtual environments. :param builder: A TreeBuilder subclass to instantiate (or instance to use) instead of looking one up based on `features`. You only need to use this if you've implemented a custom TreeBuilder. :param parse_only: A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered. This is useful when parsing part of a document that would otherwise be too large to fit into memory. :param from_encoding: A string indicating the encoding of the document to be parsed. Pass this in if Beautiful Soup is guessing wrongly about the document's encoding. :param exclude_encodings: A list of strings indicating encodings known to be wrong. Pass this in if you don't know the document's encoding but you know Beautiful Soup's guess is wrong. :param element_classes: A dictionary mapping BeautifulSoup classes like Tag and NavigableString, to other classes you'd like to be instantiated instead as the parse tree is built. This is useful for subclassing Tag or NavigableString to modify default behavior. :param kwargs: For backwards compatibility purposes, the constructor accepts certain keyword arguments used in Beautiful Soup 3. None of these arguments do anything in Beautiful Soup 4; they will result in a warning and then be ignored. Apart from this, any keyword arguments passed into the BeautifulSoup constructor are propagated to the TreeBuilder constructor. This makes it possible to configure a TreeBuilder by passing in arguments, not just by saying which one to use. """ if 'convertEntities' in kwargs: del kwargs['convertEntities'] warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. Suggest you use " "features='lxml' for HTML and features='lxml-xml' for " "XML.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name), DeprecationWarning, stacklevel=3 ) return kwargs.pop(old_name) return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if from_encoding and isinstance(markup, str): warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") from_encoding = None self.element_classes = element_classes or dict() # We need this information to track whether or not the builder # was specified well enough that we can omit the 'you need to # specify a parser' warning. original_builder = builder original_features = features if isinstance(builder, type): # A builder class was passed in; it needs to be instantiated. builder_class = builder builder = None elif builder is None: if isinstance(features, str): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) # At this point either we have a TreeBuilder instance in # builder, or we have a builder_class that we can instantiate # with the remaining **kwargs. if builder is None: builder = builder_class(**kwargs) if not original_builder and not ( original_features == builder.NAME or original_features in builder.ALTERNATE_NAMES ) and markup: # The user did not tell us which TreeBuilder to use, # and we had to guess. Issue a warning. if builder.is_xml: markup_type = "XML" else: markup_type = "HTML" # This code adapted from warnings.py so that we get the same line # of code as our warnings.warn() call gets, even if the answer is wrong # (as it may be in a multithreading situation). caller = None try: caller = sys._getframe(1) except ValueError: pass if caller: globals = caller.f_globals line_number = caller.f_lineno else: globals = sys.__dict__ line_number= 1 filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] if filename: # If there is no filename at all, the user is most likely in a REPL, # and the warning is not necessary. values = dict( filename=filename, line_number=line_number, parser=builder.NAME, markup_type=markup_type ) warnings.warn( self.NO_PARSER_SPECIFIED_WARNING % values, GuessedAtParserWarning, stacklevel=2 ) else: if kwargs: warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.") self.builder = builder self.is_xml = builder.is_xml self.known_xml = self.is_xml self._namespaces = dict() self.parse_only = parse_only if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256 and ( (isinstance(markup, bytes) and not b'<' in markup) or (isinstance(markup, str) and not '<' in markup) ): # Issue warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # since that is sometimes the intended behavior. if not self._markup_is_url(markup): self._markup_resembles_filename(markup) rejections = [] success = False for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup( markup, from_encoding, exclude_encodings=exclude_encodings)): self.reset() self.builder.initialize_soup(self) try: self._feed() success = True break except ParserRejectedMarkup as e: rejections.append(e) pass if not success: other_exceptions = [str(e) for e in rejections] raise ParserRejectedMarkup( "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions) ) # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def _clone(self): """Create a new BeautifulSoup object with the same TreeBuilder, but not associated with any markup. This is the first step of the deepcopy process. """ clone = type(self)("", None, self.builder) # Keep track of the encoding of the original document, # since we won't be parsing it again. clone.original_encoding = self.original_encoding return clone def __getstate__(self): # Frequently a tree builder can't be pickled. d = dict(self.__dict__) if 'builder' in d and d['builder'] is not None and not self.builder.picklable: d['builder'] = type(self.builder) # Store the contents as a Unicode string. d['contents'] = [] d['markup'] = self.decode() # If _most_recent_element is present, it's a Tag object left # over from initial parse. It might not be picklable and we # don't need it. if '_most_recent_element' in d: del d['_most_recent_element'] return d def __setstate__(self, state): # If necessary, restore the TreeBuilder by looking it up. self.__dict__ = state if isinstance(self.builder, type): self.builder = self.builder() elif not self.builder: # We don't know which builder was used to build this # parse tree, so use a default we know is always available. self.builder = HTMLParserTreeBuilder() self.builder.soup = self self.reset() self._feed() return state def _decode_markup(cls, markup): """Ensure `markup` is bytes so it's safe to send into warnings.warn. TODO: warnings.warn had this problem back in 2010 but it might not anymore. """ if isinstance(markup, bytes): decoded = markup.decode('utf-8', 'replace') else: decoded = markup return decoded def _markup_is_url(cls, markup): """Error-handling method to raise a warning if incoming markup looks like a URL. :param markup: A string. :return: Whether or not the markup resembles a URL closely enough to justify a warning. """ if isinstance(markup, bytes): space = b' ' cant_start_with = (b"http:", b"https:") elif isinstance(markup, str): space = ' ' cant_start_with = ("http:", "https:") else: return False if any(markup.startswith(prefix) for prefix in cant_start_with): if not space in markup: warnings.warn( 'The input looks more like a URL than markup. You may want to use' ' an HTTP client like requests to get the document behind' ' the URL, and feed that document to Beautiful Soup.', MarkupResemblesLocatorWarning, stacklevel=3 ) return True return False def _markup_resembles_filename(cls, markup): """Error-handling method to raise a warning if incoming markup resembles a filename. :param markup: A bytestring or string. :return: Whether or not the markup resembles a filename closely enough to justify a warning. """ path_characters = '/\\' extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt'] if isinstance(markup, bytes): path_characters = path_characters.encode("utf8") extensions = [x.encode('utf8') for x in extensions] filelike = False if any(x in markup for x in path_characters): filelike = True else: lower = markup.lower() if any(lower.endswith(ext) for ext in extensions): filelike = True if filelike: warnings.warn( 'The input looks more like a filename than markup. You may' ' want to open this file and pass the filehandle into' ' Beautiful Soup.', MarkupResemblesLocatorWarning, stacklevel=3 ) return True return False def _feed(self): """Internal method that parses previously set markup, creating a large number of Tag and NavigableString objects. """ # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): """Reset this object to a state as though it had never parsed any markup. """ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.open_tag_counter = Counter() self.preserve_whitespace_tag_stack = [] self.string_container_stack = [] self._most_recent_element = None self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, sourceline=None, sourcepos=None, **kwattrs): """Create a new Tag associated with this BeautifulSoup object. :param name: The name of the new Tag. :param namespace: The URI of the new Tag's XML namespace, if any. :param prefix: The prefix for the new Tag's XML namespace, if any. :param attrs: A dictionary of this Tag's attribute values; can be used instead of `kwattrs` for attributes like 'class' that are reserved words in Python. :param sourceline: The line number where this tag was (purportedly) found in its source document. :param sourcepos: The character position within `sourceline` where this tag was (purportedly) found. :param kwattrs: Keyword arguments for the new Tag's attribute values. """ kwattrs.update(attrs) return self.element_classes.get(Tag, Tag)( None, self.builder, name, namespace, nsprefix, kwattrs, sourceline=sourceline, sourcepos=sourcepos ) def string_container(self, base_class=None): container = base_class or NavigableString # There may be a general override of NavigableString. container = self.element_classes.get( container, container ) # On top of that, we may be inside a tag that needs a special # container class. if self.string_container_stack and container is NavigableString: container = self.builder.string_containers.get( self.string_container_stack[-1].name, container ) return container def new_string(self, s, subclass=None): """Create a new NavigableString associated with this BeautifulSoup object. """ container = self.string_container(subclass) return container(s) def insert_before(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, *args): """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement it because there is nothing before or after it in the parse tree. """ raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): """Internal method called by _popToTag when a tag is closed.""" tag = self.tagStack.pop() if tag.name in self.open_tag_counter: self.open_tag_counter[tag.name] -= 1 if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() if self.string_container_stack and tag == self.string_container_stack[-1]: self.string_container_stack.pop() #print("Pop", tag.name) if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): """Internal method called by handle_starttag when a tag is opened.""" #print("Push", tag.name) if self.currentTag is not None: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name != self.ROOT_TAG_NAME: self.open_tag_counter[tag.name] += 1 if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) if tag.name in self.builder.string_containers: self.string_container_stack.append(tag) def endData(self, containerClass=None): """Method called by the TreeBuilder when the end of a data segment occurs. """ if self.current_data: current_data = ''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return containerClass = self.string_container(containerClass) o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Method called by the TreeBuilder to integrate an object into the parse tree.""" if parent is None: parent = self.currentTag if most_recent_element is not None: previous_element = most_recent_element else: previous_element = self._most_recent_element next_element = previous_sibling = next_sibling = None if isinstance(o, Tag): next_element = o.next_element next_sibling = o.next_sibling previous_sibling = o.previous_sibling if previous_element is None: previous_element = o.previous_element fix = parent.next_element is not None o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) self._most_recent_element = o parent.contents.append(o) # Check if we are inserting into an already parsed node. if fix: self._linkage_fixer(parent) def _linkage_fixer(self, el): """Make sure linkage of this fragment is sound.""" first = el.contents[0] child = el.contents[-1] descendant = child if child is first and el.parent is not None: # Parent should be linked to first child el.next_element = child # We are no longer linked to whatever this element is prev_el = child.previous_element if prev_el is not None and prev_el is not el: prev_el.next_element = None # First child should be linked to the parent, and no previous siblings. child.previous_element = el child.previous_sibling = None # We have no sibling as we've been appended as the last. child.next_sibling = None # This index is a tag, dig deeper for a "last descendant" if isinstance(child, Tag) and child.contents: descendant = child._last_descendant(False) # As the final step, link last descendant. It should be linked # to the parent's next sibling (if found), else walk up the chain # and find a parent with a sibling. It should have no next sibling. descendant.next_element = None descendant.next_sibling = None target = el while True: if target is None: break elif target.next_sibling is not None: descendant.next_element = target.next_sibling target.next_sibling.previous_element = child break target = target.parent def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If there are no open tags with the given name, nothing will be popped. :param name: Pop up to the most recent tag with this name. :param nsprefix: The namespace prefix that goes with `name`. :param inclusivePop: It this is false, pops the tag stack up to but *not* including the most recent instqance of the given tag. """ #print("Popping to %s" % name) if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): if not self.open_tag_counter.get(name): break t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None, sourcepos=None, namespaces=None): """Called by the tree builder when a new tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. :param attrs: A dictionary of attribute values. :param sourceline: The line number where this tag was found in its source document. :param sourcepos: The character position within `sourceline` where this tag was found. :param namespaces: A dictionary of all namespace prefix mappings currently in scope in the document. If this method returns None, the tag was rejected by an active SoupStrainer. You should proceed as if the tag had not occurred in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print("Start tag %s: %s" % (name, attrs)) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = self.element_classes.get(Tag, Tag)( self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element, sourceline=sourceline, sourcepos=sourcepos, namespaces=namespaces ) if tag is None: return tag if self._most_recent_element is not None: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): """Called by the tree builder when an ending tag is encountered. :param name: Name of the tag. :param nsprefix: Namespace prefix for the tag. """ #print("End tag: " + name) self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): """Called by the tree builder when a chunk of textual data is encountered.""" self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal", iterator=None): """Returns a string or Unicode representation of the parse tree as an HTML or XML document. :param pretty_print: If this is True, indentation will be used to make the document more readable. :param eventual_encoding: The encoding of the final document. If this is None, the document will be a Unicode string. """ if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: # This is a special Python encoding; it can't actually # go into an XML document because it means nothing # outside of Python. eventual_encoding = None if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = '<?xml version="1.0"%s?>\n' % encoding_part else: prefix = '' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter, iterator) The provided code snippet includes necessary dependencies for implementing the `benchmark_parsers` function. Write a Python function `def benchmark_parsers(num_elements=100000)` to solve the following problem: Very basic head-to-head performance benchmark. Here is the function: def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" print(("Comparative parser benchmark on Beautiful Soup %s" % __version__)) data = rdoc(num_elements) print(("Generated a large invalid HTML document (%d bytes)." % len(data))) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False try: a = time.time() soup = BeautifulSoup(data, parser) b = time.time() success = True except Exception as e: print(("%s could not parse the markup." % parser)) traceback.print_exc() if success: print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a))) from lxml import etree a = time.time() etree.HTML(data) b = time.time() print(("Raw lxml parsed the markup in %.2fs." % (b-a))) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
Very basic head-to-head performance benchmark.
174,523
import cProfile from io import BytesIO from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" The provided code snippet includes necessary dependencies for implementing the `profile` function. Write a Python function `def profile(num_elements=100000, parser="lxml")` to solve the following problem: Use Python's profiler on a randomly generated document. Here is the function: def profile(num_elements=100000, parser="lxml"): """Use Python's profiler on a randomly generated document.""" filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name data = rdoc(num_elements) vars = dict(bs4=bs4, data=data, parser=parser) cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) stats = pstats.Stats(filename) # stats.strip_dirs() stats.sort_stats("cumulative") stats.print_stats('_html5lib|bs4', 50)
Use Python's profiler on a randomly generated document.
174,524
import logging import re import socket import threading import time from timeit import default_timer from typing import Callable, Tuple from ..registry import CollectorRegistry, REGISTRY _INVALID_GRAPHITE_CHARS = re.compile(r"[^a-zA-Z0-9_-]") def _sanitize(s): return _INVALID_GRAPHITE_CHARS.sub('_', s)
null
174,525
import io as StringIO import re from .metrics_core import Metric from .samples import Sample def text_fd_to_metric_families(fd): """Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's. """ name = '' documentation = '' typ = 'untyped' samples = [] allowed_names = [] def build_metric(name, documentation, typ, samples): # Munge counters into OpenMetrics representation # used internally. if typ == 'counter': if name.endswith('_total'): name = name[:-6] else: new_samples = [] for s in samples: new_samples.append(Sample(s[0] + '_total', *s[1:])) samples = new_samples metric = Metric(name, documentation, typ) metric.samples = samples return metric for line in fd: line = line.strip() if line.startswith('#'): parts = line.split(None, 3) if len(parts) < 2: continue if parts[1] == 'HELP': if parts[2] != name: if name != '': yield build_metric(name, documentation, typ, samples) # New metric name = parts[2] typ = 'untyped' samples = [] allowed_names = [parts[2]] if len(parts) == 4: documentation = _replace_help_escaping(parts[3]) else: documentation = '' elif parts[1] == 'TYPE': if parts[2] != name: if name != '': yield build_metric(name, documentation, typ, samples) # New metric name = parts[2] documentation = '' samples = [] typ = parts[3] allowed_names = { 'counter': [''], 'gauge': [''], 'summary': ['_count', '_sum', ''], 'histogram': ['_count', '_sum', '_bucket'], }.get(typ, ['']) allowed_names = [name + n for n in allowed_names] else: # Ignore other comment tokens pass elif line == '': # Ignore blank lines pass else: sample = _parse_sample(line) if sample.name not in allowed_names: if name != '': yield build_metric(name, documentation, typ, samples) # New metric, yield immediately as untyped singleton name = '' documentation = '' typ = 'untyped' samples = [] allowed_names = [] yield build_metric(sample[0], documentation, typ, [sample]) else: samples.append(sample) if name != '': yield build_metric(name, documentation, typ, samples) The provided code snippet includes necessary dependencies for implementing the `text_string_to_metric_families` function. Write a Python function `def text_string_to_metric_families(text)` to solve the following problem: Parse Prometheus text format from a unicode string. See text_fd_to_metric_families. Here is the function: def text_string_to_metric_families(text): """Parse Prometheus text format from a unicode string. See text_fd_to_metric_families. """ yield from text_fd_to_metric_families(StringIO.StringIO(text))
Parse Prometheus text format from a unicode string. See text_fd_to_metric_families.
174,526
from __future__ import print_function import collections import inspect import itertools import operator import re import sys if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g(*a, **k)) ContextManager.__init__ = __init__ elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 pass elif n_args == 4: # (self, gen, args, kwds) Python 3.5 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g, a, k) ContextManager.__init__ = __init__ def get_init(cls): return cls.__init__.__func__
null
174,527
from __future__ import print_function import collections import inspect import itertools import operator import re import sys if sys.version_info >= (3,): from inspect import getfullargspec else: class getfullargspec(object): "A quick and dirty replacement for getfullargspec for Python 2.X" def __init__(self, f): self.args, self.varargs, self.varkw, self.defaults = \ inspect.getargspec(f) self.kwonlyargs = [] self.kwonlydefaults = None def __iter__(self): yield self.args yield self.varargs yield self.varkw yield self.defaults getargspec = inspect.getargspec ArgSpec = collections.namedtuple( 'ArgSpec', 'args varargs varkw defaults') def getfullargspec(func: object) -> FullArgSpec: ... The provided code snippet includes necessary dependencies for implementing the `getargspec` function. Write a Python function `def getargspec(f)` to solve the following problem: A replacement for inspect.getargspec Here is the function: def getargspec(f): """A replacement for inspect.getargspec""" spec = getfullargspec(f) return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
A replacement for inspect.getargspec
174,528
from __future__ import print_function import collections import inspect import itertools import operator import re import sys class FunctionMaker(object): """ An object with the ability to create functions with a given signature. It has attributes name, doc, module, signature, defaults, dict and methods update and make. """ # Atomic get-and-increment provided by the GIL _compile_count = itertools.count() def __init__(self, func=None, name=None, signature=None, defaults=None, doc=None, module=None, funcdict=None): self.shortsignature = signature if func: # func can be a class or a callable, but not an instance method self.name = func.__name__ if self.name == '<lambda>': # small hack for lambda functions self.name = '_lambda_' self.doc = func.__doc__ self.module = func.__module__ if inspect.isfunction(func): argspec = getfullargspec(func) self.annotations = getattr(func, '__annotations__', {}) for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults'): setattr(self, a, getattr(argspec, a)) for i, arg in enumerate(self.args): setattr(self, 'arg%d' % i, arg) if sys.version_info < (3,): # easy way self.shortsignature = self.signature = ( inspect.formatargspec( formatvalue=lambda val: "", *argspec)[1:-1]) else: # Python 3 way allargs = list(self.args) allshortargs = list(self.args) if self.varargs: allargs.append('*' + self.varargs) allshortargs.append('*' + self.varargs) elif self.kwonlyargs: allargs.append('*') # single star syntax for a in self.kwonlyargs: allargs.append('%s=None' % a) allshortargs.append('%s=%s' % (a, a)) if self.varkw: allargs.append('**' + self.varkw) allshortargs.append('**' + self.varkw) self.signature = ', '.join(allargs) self.shortsignature = ', '.join(allshortargs) self.dict = func.__dict__.copy() # func=None happens when decorating a caller if name: self.name = name if signature is not None: self.signature = signature if defaults: self.defaults = defaults if doc: self.doc = doc if module: self.module = module if funcdict: self.dict = funcdict # check existence required attributes assert hasattr(self, 'name') if not hasattr(self, 'signature'): raise TypeError('You are decorating a non function: %s' % func) def update(self, func, **kw): "Update the signature of func with the data in self" func.__name__ = self.name func.__doc__ = getattr(self, 'doc', None) func.__dict__ = getattr(self, 'dict', {}) func.__defaults__ = getattr(self, 'defaults', ()) func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) func.__annotations__ = getattr(self, 'annotations', None) try: frame = sys._getframe(3) except AttributeError: # for IronPython and similar implementations callermodule = '?' else: callermodule = frame.f_globals.get('__name__', '?') func.__module__ = getattr(self, 'module', callermodule) func.__dict__.update(kw) def make(self, src_templ, evaldict=None, addsource=False, **attrs): "Make a new function from a given template and update the signature" src = src_templ % vars(self) # expand name and signature evaldict = evaldict or {} mo = DEF.match(src) if mo is None: raise SyntaxError('not a valid function template\n%s' % src) name = mo.group(1) # extract the function name names = set([name] + [arg.strip(' *') for arg in self.shortsignature.split(',')]) for n in names: if n in ('_func_', '_call_'): raise NameError('%s is overridden in\n%s' % (n, src)) if not src.endswith('\n'): # add a newline for old Pythons src += '\n' # Ensure each generated function has a unique filename for profilers # (such as cProfile) that depend on the tuple of (<filename>, # <definition line>, <function name>) being unique. filename = '<decorator-gen-%d>' % (next(self._compile_count),) try: code = compile(src, filename, 'single') exec(code, evaldict) except: print('Error in generated code:', file=sys.stderr) print(src, file=sys.stderr) raise func = evaldict[name] if addsource: attrs['__source__'] = src self.update(func, **attrs) return func def create(cls, obj, body, evaldict, defaults=None, doc=None, module=None, addsource=True, **attrs): """ Create a function from the strings name, signature and body. evaldict is the evaluation dictionary. If addsource is true an attribute __source__ is added to the result. The attributes attrs are added, if any. """ if isinstance(obj, str): # "name(signature)" name, rest = obj.strip().split('(', 1) signature = rest[:-1] # strip a right parens func = None else: # a function name = None signature = None func = obj self = cls(func, name, signature, defaults, doc, module) ibody = '\n'.join(' ' + line for line in body.splitlines()) return self.make('def %(name)s(%(signature)s):\n' + ibody, evaldict, addsource, **attrs) def decorate(func, caller): """ decorate(func, caller) decorates a function using a caller. """ evaldict = dict(_call_=caller, _func_=func) fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun The provided code snippet includes necessary dependencies for implementing the `decorator` function. Write a Python function `def decorator(caller, _func=None)` to solve the following problem: decorator(caller) converts a caller function into a decorator Here is the function: def decorator(caller, _func=None): """decorator(caller) converts a caller function into a decorator""" if _func is not None: # return a decorated function # this is obsolete behavior; you should use decorate instead return decorate(_func, caller) # else return a decorator function if inspect.isclass(caller): name = caller.__name__.lower() doc = 'decorator(%s) converts functions/generators into ' \ 'factories of %s objects' % (caller.__name__, caller.__name__) elif inspect.isfunction(caller): if caller.__name__ == '<lambda>': name = '_lambda_' else: name = caller.__name__ doc = caller.__doc__ else: # assume caller is an object with a __call__ method name = caller.__class__.__name__.lower() doc = caller.__call__.__doc__ evaldict = dict(_call_=caller, _decorate_=decorate) return FunctionMaker.create( '%s(func)' % name, 'return _decorate_(func, _call_)', evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
decorator(caller) converts a caller function into a decorator
174,529
from __future__ import print_function import collections import inspect import itertools import operator import re import sys if sys.version_info >= (3,): from inspect import getfullargspec else: class getfullargspec(object): "A quick and dirty replacement for getfullargspec for Python 2.X" def __init__(self, f): self.args, self.varargs, self.varkw, self.defaults = \ inspect.getargspec(f) self.kwonlyargs = [] self.kwonlydefaults = None def __iter__(self): yield self.args yield self.varargs yield self.varkw yield self.defaults getargspec = inspect.getargspec class FunctionMaker(object): """ An object with the ability to create functions with a given signature. It has attributes name, doc, module, signature, defaults, dict and methods update and make. """ # Atomic get-and-increment provided by the GIL _compile_count = itertools.count() def __init__(self, func=None, name=None, signature=None, defaults=None, doc=None, module=None, funcdict=None): self.shortsignature = signature if func: # func can be a class or a callable, but not an instance method self.name = func.__name__ if self.name == '<lambda>': # small hack for lambda functions self.name = '_lambda_' self.doc = func.__doc__ self.module = func.__module__ if inspect.isfunction(func): argspec = getfullargspec(func) self.annotations = getattr(func, '__annotations__', {}) for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults'): setattr(self, a, getattr(argspec, a)) for i, arg in enumerate(self.args): setattr(self, 'arg%d' % i, arg) if sys.version_info < (3,): # easy way self.shortsignature = self.signature = ( inspect.formatargspec( formatvalue=lambda val: "", *argspec)[1:-1]) else: # Python 3 way allargs = list(self.args) allshortargs = list(self.args) if self.varargs: allargs.append('*' + self.varargs) allshortargs.append('*' + self.varargs) elif self.kwonlyargs: allargs.append('*') # single star syntax for a in self.kwonlyargs: allargs.append('%s=None' % a) allshortargs.append('%s=%s' % (a, a)) if self.varkw: allargs.append('**' + self.varkw) allshortargs.append('**' + self.varkw) self.signature = ', '.join(allargs) self.shortsignature = ', '.join(allshortargs) self.dict = func.__dict__.copy() # func=None happens when decorating a caller if name: self.name = name if signature is not None: self.signature = signature if defaults: self.defaults = defaults if doc: self.doc = doc if module: self.module = module if funcdict: self.dict = funcdict # check existence required attributes assert hasattr(self, 'name') if not hasattr(self, 'signature'): raise TypeError('You are decorating a non function: %s' % func) def update(self, func, **kw): "Update the signature of func with the data in self" func.__name__ = self.name func.__doc__ = getattr(self, 'doc', None) func.__dict__ = getattr(self, 'dict', {}) func.__defaults__ = getattr(self, 'defaults', ()) func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) func.__annotations__ = getattr(self, 'annotations', None) try: frame = sys._getframe(3) except AttributeError: # for IronPython and similar implementations callermodule = '?' else: callermodule = frame.f_globals.get('__name__', '?') func.__module__ = getattr(self, 'module', callermodule) func.__dict__.update(kw) def make(self, src_templ, evaldict=None, addsource=False, **attrs): "Make a new function from a given template and update the signature" src = src_templ % vars(self) # expand name and signature evaldict = evaldict or {} mo = DEF.match(src) if mo is None: raise SyntaxError('not a valid function template\n%s' % src) name = mo.group(1) # extract the function name names = set([name] + [arg.strip(' *') for arg in self.shortsignature.split(',')]) for n in names: if n in ('_func_', '_call_'): raise NameError('%s is overridden in\n%s' % (n, src)) if not src.endswith('\n'): # add a newline for old Pythons src += '\n' # Ensure each generated function has a unique filename for profilers # (such as cProfile) that depend on the tuple of (<filename>, # <definition line>, <function name>) being unique. filename = '<decorator-gen-%d>' % (next(self._compile_count),) try: code = compile(src, filename, 'single') exec(code, evaldict) except: print('Error in generated code:', file=sys.stderr) print(src, file=sys.stderr) raise func = evaldict[name] if addsource: attrs['__source__'] = src self.update(func, **attrs) return func def create(cls, obj, body, evaldict, defaults=None, doc=None, module=None, addsource=True, **attrs): """ Create a function from the strings name, signature and body. evaldict is the evaluation dictionary. If addsource is true an attribute __source__ is added to the result. The attributes attrs are added, if any. """ if isinstance(obj, str): # "name(signature)" name, rest = obj.strip().split('(', 1) signature = rest[:-1] # strip a right parens func = None else: # a function name = None signature = None func = obj self = cls(func, name, signature, defaults, doc, module) ibody = '\n'.join(' ' + line for line in body.splitlines()) return self.make('def %(name)s(%(signature)s):\n' + ibody, evaldict, addsource, **attrs) def append(a, vancestors): """ Append ``a`` to the list of the virtual ancestors, unless it is already included. """ add = True for j, va in enumerate(vancestors): if issubclass(va, a): add = False break if issubclass(a, va): vancestors[j] = a add = False if add: vancestors.append(a) def getfullargspec(func: object) -> FullArgSpec: ... The provided code snippet includes necessary dependencies for implementing the `dispatch_on` function. Write a Python function `def dispatch_on(*dispatch_args)` to solve the following problem: Factory of decorators turning a function into a generic function dispatching on the given arguments. Here is the function: def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.__mro__: append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).__mro__[1:] else: mro = t.__mro__ lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec
Factory of decorators turning a function into a generic function dispatching on the given arguments.
174,530
from typing import Callable from urllib.parse import parse_qs from .exposition import _bake_output from .registry import CollectorRegistry, REGISTRY class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) def _bake_output(registry, accept_header, accept_encoding_header, params, disable_compression): """Bake output for metrics output.""" # Choose the correct plain text format of the output. encoder, content_type = choose_encoder(accept_header) if 'name[]' in params: registry = registry.restricted_registry(params['name[]']) output = encoder(registry) headers = [('Content-Type', content_type)] # If gzip encoding required, gzip the output. if not disable_compression and gzip_accepted(accept_encoding_header): output = gzip.compress(output) headers.append(('Content-Encoding', 'gzip')) return '200 OK', headers, output class CollectorRegistry(Collector): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe: bool = False, target_info: Optional[Dict[str, str]] = None): self._collector_to_names: Dict[Collector, List[str]] = {} self._names_to_collectors: Dict[str, Collector] = {} self._auto_describe = auto_describe self._lock = Lock() self._target_info: Optional[Dict[str, str]] = {} self.set_target_info(target_info) def register(self, collector: Collector) -> None: """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector: Collector) -> None: """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces and clashes with.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): result.append(metric.name) for suffix in type_suffixes.get(metric.type, []): result.append(metric.name + suffix) return result def collect(self) -> Iterable[Metric]: """Yields metrics from the collectors in the registry.""" collectors = None ti = None with self._lock: collectors = copy.copy(self._collector_to_names) if self._target_info: ti = self._target_info_metric() if ti: yield ti for collector in collectors: yield from collector.collect() def restricted_registry(self, names: Iterable[str]) -> "RestrictedRegistry": """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) return RestrictedRegistry(names, self) def set_target_info(self, labels: Optional[Dict[str, str]]) -> None: with self._lock: if labels: if not self._target_info and 'target_info' in self._names_to_collectors: raise ValueError('CollectorRegistry already contains a target_info metric') self._names_to_collectors['target_info'] = _EmptyCollector() elif self._target_info: self._names_to_collectors.pop('target_info', None) self._target_info = labels def get_target_info(self) -> Optional[Dict[str, str]]: with self._lock: return self._target_info def _target_info_metric(self): m = Metric('target', 'Target metadata', 'info') m.add_sample('target_info', self._target_info, 1) return m def get_sample_value(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[float]: """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None REGISTRY = CollectorRegistry(auto_describe=True) The provided code snippet includes necessary dependencies for implementing the `make_asgi_app` function. Write a Python function `def make_asgi_app(registry: CollectorRegistry = REGISTRY, disable_compression: bool = False) -> Callable` to solve the following problem: Create a ASGI app which serves the metrics from a registry. Here is the function: def make_asgi_app(registry: CollectorRegistry = REGISTRY, disable_compression: bool = False) -> Callable: """Create a ASGI app which serves the metrics from a registry.""" async def prometheus_app(scope, receive, send): assert scope.get("type") == "http" # Prepare parameters params = parse_qs(scope.get('query_string', b'')) accept_header = ",".join([ value.decode("utf8") for (name, value) in scope.get('headers') if name.decode("utf8").lower() == 'accept' ]) accept_encoding_header = ",".join([ value.decode("utf8") for (name, value) in scope.get('headers') if name.decode("utf8").lower() == 'accept-encoding' ]) # Bake output status, headers, output = _bake_output(registry, accept_header, accept_encoding_header, params, disable_compression) formatted_headers = [] for header in headers: formatted_headers.append(tuple(x.encode('utf8') for x in header)) # Return output payload = await receive() if payload.get("type") == "http.request": await send( { "type": "http.response.start", "status": int(status.split(' ')[0]), "headers": formatted_headers, } ) await send({"type": "http.response.body", "body": output}) return prometheus_app
Create a ASGI app which serves the metrics from a registry.
174,531
import os from threading import Lock import warnings from .mmap_dict import mmap_key, MmapedDict class MutexValue: """A float protected by a mutex.""" _multiprocess = False def __init__(self, typ, metric_name, name, labelnames, labelvalues, help_text, **kwargs): self._value = 0.0 self._exemplar = None self._lock = Lock() def inc(self, amount): with self._lock: self._value += amount def set(self, value): with self._lock: self._value = value def set_exemplar(self, exemplar): with self._lock: self._exemplar = exemplar def get(self): with self._lock: return self._value def get_exemplar(self): with self._lock: return self._exemplar def MultiProcessValue(process_identifier=os.getpid): """Returns a MmapedValue class based on a process_identifier function. The 'process_identifier' function MUST comply with this simple rule: when called in simultaneously running processes it MUST return distinct values. Using a different function than the default 'os.getpid' is at your own risk. """ files = {} values = [] pid = {'value': process_identifier()} # Use a single global lock when in multi-processing mode # as we presume this means there is no threading going on. # This avoids the need to also have mutexes in __MmapDict. lock = Lock() class MmapedValue: """A float protected by a mutex backed by a per-process mmaped file.""" _multiprocess = True def __init__(self, typ, metric_name, name, labelnames, labelvalues, help_text, multiprocess_mode='', **kwargs): self._params = typ, metric_name, name, labelnames, labelvalues, help_text, multiprocess_mode # This deprecation warning can go away in a few releases when removing the compatibility if 'prometheus_multiproc_dir' in os.environ and 'PROMETHEUS_MULTIPROC_DIR' not in os.environ: os.environ['PROMETHEUS_MULTIPROC_DIR'] = os.environ['prometheus_multiproc_dir'] warnings.warn("prometheus_multiproc_dir variable has been deprecated in favor of the upper case naming PROMETHEUS_MULTIPROC_DIR", DeprecationWarning) with lock: self.__check_for_pid_change() self.__reset() values.append(self) def __reset(self): typ, metric_name, name, labelnames, labelvalues, help_text, multiprocess_mode = self._params if typ == 'gauge': file_prefix = typ + '_' + multiprocess_mode else: file_prefix = typ if file_prefix not in files: filename = os.path.join( os.environ.get('PROMETHEUS_MULTIPROC_DIR'), '{}_{}.db'.format(file_prefix, pid['value'])) files[file_prefix] = MmapedDict(filename) self._file = files[file_prefix] self._key = mmap_key(metric_name, name, labelnames, labelvalues, help_text) self._value = self._file.read_value(self._key) def __check_for_pid_change(self): actual_pid = process_identifier() if pid['value'] != actual_pid: pid['value'] = actual_pid # There has been a fork(), reset all the values. for f in files.values(): f.close() files.clear() for value in values: value.__reset() def inc(self, amount): with lock: self.__check_for_pid_change() self._value += amount self._file.write_value(self._key, self._value) def set(self, value): with lock: self.__check_for_pid_change() self._value = value self._file.write_value(self._key, self._value) def set_exemplar(self, exemplar): # TODO: Implement exemplars for multiprocess mode. return def get(self): with lock: self.__check_for_pid_change() return self._value def get_exemplar(self): # TODO: Implement exemplars for multiprocess mode. return None return MmapedValue def get_value_class(): # Should we enable multi-process mode? # This needs to be chosen before the first metric is constructed, # and as that may be in some arbitrary library the user/admin has # no control over we use an environment variable. if 'prometheus_multiproc_dir' in os.environ or 'PROMETHEUS_MULTIPROC_DIR' in os.environ: return MultiProcessValue() else: return MutexValue
null
174,532
import io as StringIO import math import re from ..metrics_core import Metric, METRIC_LABEL_NAME_RE from ..samples import Exemplar, Sample, Timestamp from ..utils import floatToGoString def text_fd_to_metric_families(fd): """Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's. """ name = None allowed_names = [] eof = False seen_names = set() type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['', '_count', '_sum', '_created'], 'histogram': ['_count', '_sum', '_bucket', '_created'], 'gaugehistogram': ['_gcount', '_gsum', '_bucket'], 'info': ['_info'], } def build_metric(name, documentation, typ, unit, samples): if typ is None: typ = 'unknown' for suffix in set(type_suffixes.get(typ, []) + [""]): if name + suffix in seen_names: raise ValueError("Clashing name: " + name + suffix) seen_names.add(name + suffix) if documentation is None: documentation = '' if unit is None: unit = '' if unit and not name.endswith("_" + unit): raise ValueError("Unit does not match metric name: " + name) if unit and typ in ['info', 'stateset']: raise ValueError("Units not allowed for this metric type: " + name) if typ in ['histogram', 'gaugehistogram']: _check_histogram(samples, name) metric = Metric(name, documentation, typ, unit) # TODO: check labelvalues are valid utf8 metric.samples = samples return metric for line in fd: if line[-1] == '\n': line = line[:-1] if eof: raise ValueError("Received line after # EOF: " + line) if not line: raise ValueError("Received blank line") if line == '# EOF': eof = True elif line.startswith('#'): parts = line.split(' ', 3) if len(parts) < 4: raise ValueError("Invalid line: " + line) if parts[2] == name and samples: raise ValueError("Received metadata after samples: " + line) if parts[2] != name: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # New metric name = parts[2] unit = None typ = None documentation = None group = None seen_groups = set() group_timestamp = None group_timestamp_samples = set() samples = [] allowed_names = [parts[2]] if parts[1] == 'HELP': if documentation is not None: raise ValueError("More than one HELP for metric: " + line) documentation = _unescape_help(parts[3]) elif parts[1] == 'TYPE': if typ is not None: raise ValueError("More than one TYPE for metric: " + line) typ = parts[3] if typ == 'untyped': raise ValueError("Invalid TYPE for metric: " + line) allowed_names = [name + n for n in type_suffixes.get(typ, [''])] elif parts[1] == 'UNIT': if unit is not None: raise ValueError("More than one UNIT for metric: " + line) unit = parts[3] else: raise ValueError("Invalid line: " + line) else: sample = _parse_sample(line) if sample.name not in allowed_names: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # Start an unknown metric. name = sample.name documentation = None unit = None typ = 'unknown' samples = [] group = None group_timestamp = None group_timestamp_samples = set() seen_groups = set() allowed_names = [sample.name] if typ == 'stateset' and name not in sample.labels: raise ValueError("Stateset missing label: " + line) if (name + '_bucket' == sample.name and (sample.labels.get('le', "NaN") == "NaN" or _isUncanonicalNumber(sample.labels['le']))): raise ValueError("Invalid le label: " + line) if (name + '_bucket' == sample.name and (not isinstance(sample.value, int) and not sample.value.is_integer())): raise ValueError("Bucket value must be an integer: " + line) if ((name + '_count' == sample.name or name + '_gcount' == sample.name) and (not isinstance(sample.value, int) and not sample.value.is_integer())): raise ValueError("Count value must be an integer: " + line) if (typ == 'summary' and name == sample.name and (not (0 <= float(sample.labels.get('quantile', -1)) <= 1) or _isUncanonicalNumber(sample.labels['quantile']))): raise ValueError("Invalid quantile label: " + line) g = tuple(sorted(_group_for_sample(sample, name, typ).items())) if group is not None and g != group and g in seen_groups: raise ValueError("Invalid metric grouping: " + line) if group is not None and g == group: if (sample.timestamp is None) != (group_timestamp is None): raise ValueError("Mix of timestamp presence within a group: " + line) if group_timestamp is not None and group_timestamp > sample.timestamp and typ != 'info': raise ValueError("Timestamps went backwards within a group: " + line) else: group_timestamp_samples = set() series_id = (sample.name, tuple(sorted(sample.labels.items()))) if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples: # Not a duplicate due to timestamp truncation. samples.append(sample) group_timestamp_samples.add(series_id) group = g group_timestamp = sample.timestamp seen_groups.add(g) if typ == 'stateset' and sample.value not in [0, 1]: raise ValueError("Stateset samples can only have values zero and one: " + line) if typ == 'info' and sample.value != 1: raise ValueError("Info samples can only have value one: " + line) if typ == 'summary' and name == sample.name and sample.value < 0: raise ValueError("Quantile values cannot be negative: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and math.isnan( sample.value): raise ValueError("Counter-like samples cannot be NaN: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount'] and sample.value < 0: raise ValueError("Counter-like samples cannot be negative: " + line) if sample.exemplar and not ( (typ in ['histogram', 'gaugehistogram'] and sample.name.endswith('_bucket')) or (typ in ['counter'] and sample.name.endswith('_total'))): raise ValueError("Invalid line only histogram/gaugehistogram buckets and counters can have exemplars: " + line) if name is not None: yield build_metric(name, documentation, typ, unit, samples) if not eof: raise ValueError("Missing # EOF at end") The provided code snippet includes necessary dependencies for implementing the `text_string_to_metric_families` function. Write a Python function `def text_string_to_metric_families(text)` to solve the following problem: Parse Openmetrics text format from a unicode string. See text_fd_to_metric_families. Here is the function: def text_string_to_metric_families(text): """Parse Openmetrics text format from a unicode string. See text_fd_to_metric_families. """ yield from text_fd_to_metric_families(StringIO.StringIO(text))
Parse Openmetrics text format from a unicode string. See text_fd_to_metric_families.
174,533
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def make_wsgi_app(registry: CollectorRegistry = REGISTRY, disable_compression: bool = False) -> Callable: """Create a WSGI app which serves the metrics from a registry.""" def prometheus_app(environ, start_response): # Prepare parameters accept_header = environ.get('HTTP_ACCEPT') accept_encoding_header = environ.get('HTTP_ACCEPT_ENCODING') params = parse_qs(environ.get('QUERY_STRING', '')) if environ['PATH_INFO'] == '/favicon.ico': # Serve empty response for browsers status = '200 OK' headers = [('', '')] output = b'' else: # Bake output status, headers, output = _bake_output(registry, accept_header, accept_encoding_header, params, disable_compression) # Return output start_response(status, headers) return [output] return prometheus_app class _SilentHandler(WSGIRequestHandler): """WSGI handler that does not log requests.""" def log_message(self, format, *args): """Log nothing.""" class ThreadingWSGIServer(ThreadingMixIn, WSGIServer): """Thread per request HTTP server.""" # Make worker threads "fire and forget". Beginning with Python 3.7 this # prevents a memory leak because ``ThreadingMixIn`` starts to gather all # non-daemon threads in a list in order to join on them at server close. daemon_threads = True def _get_best_family(address, port): """Automatically select address family depending on address""" # HTTPServer defaults to AF_INET, which will not start properly if # binding an ipv6 address is requested. # This function is based on what upstream python did for http.server # in https://github.com/python/cpython/pull/11767 infos = socket.getaddrinfo(address, port) family, _, _, _, sockaddr = next(iter(infos)) return family, sockaddr[0] from .asgi import make_asgi_app class CollectorRegistry(Collector): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe: bool = False, target_info: Optional[Dict[str, str]] = None): self._collector_to_names: Dict[Collector, List[str]] = {} self._names_to_collectors: Dict[str, Collector] = {} self._auto_describe = auto_describe self._lock = Lock() self._target_info: Optional[Dict[str, str]] = {} self.set_target_info(target_info) def register(self, collector: Collector) -> None: """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector: Collector) -> None: """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces and clashes with.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): result.append(metric.name) for suffix in type_suffixes.get(metric.type, []): result.append(metric.name + suffix) return result def collect(self) -> Iterable[Metric]: """Yields metrics from the collectors in the registry.""" collectors = None ti = None with self._lock: collectors = copy.copy(self._collector_to_names) if self._target_info: ti = self._target_info_metric() if ti: yield ti for collector in collectors: yield from collector.collect() def restricted_registry(self, names: Iterable[str]) -> "RestrictedRegistry": """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) return RestrictedRegistry(names, self) def set_target_info(self, labels: Optional[Dict[str, str]]) -> None: with self._lock: if labels: if not self._target_info and 'target_info' in self._names_to_collectors: raise ValueError('CollectorRegistry already contains a target_info metric') self._names_to_collectors['target_info'] = _EmptyCollector() elif self._target_info: self._names_to_collectors.pop('target_info', None) self._target_info = labels def get_target_info(self) -> Optional[Dict[str, str]]: with self._lock: return self._target_info def _target_info_metric(self): m = Metric('target', 'Target metadata', 'info') m.add_sample('target_info', self._target_info, 1) return m def get_sample_value(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[float]: """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None REGISTRY = CollectorRegistry(auto_describe=True) The provided code snippet includes necessary dependencies for implementing the `start_wsgi_server` function. Write a Python function `def start_wsgi_server(port: int, addr: str = '0.0.0.0', registry: CollectorRegistry = REGISTRY) -> None` to solve the following problem: Starts a WSGI server for prometheus metrics as a daemon thread. Here is the function: def start_wsgi_server(port: int, addr: str = '0.0.0.0', registry: CollectorRegistry = REGISTRY) -> None: """Starts a WSGI server for prometheus metrics as a daemon thread.""" class TmpServer(ThreadingWSGIServer): """Copy of ThreadingWSGIServer to update address_family locally""" TmpServer.address_family, addr = _get_best_family(addr, port) app = make_wsgi_app(registry) httpd = make_server(addr, port, app, TmpServer, handler_class=_SilentHandler) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start()
Starts a WSGI server for prometheus metrics as a daemon thread.
174,534
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def generate_latest(registry: CollectorRegistry = REGISTRY) -> bytes: """Returns the metrics from the registry in latest text format as a string.""" def sample_line(line): if line.labels: labelstr = '{{{0}}}'.format(','.join( ['{}="{}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(line.labels.items())])) else: labelstr = '' timestamp = '' if line.timestamp is not None: # Convert to milliseconds. timestamp = f' {int(float(line.timestamp) * 1000):d}' return f'{line.name}{labelstr} {floatToGoString(line.value)}{timestamp}\n' output = [] for metric in registry.collect(): try: mname = metric.name mtype = metric.type # Munging from OpenMetrics into Prometheus format. if mtype == 'counter': mname = mname + '_total' elif mtype == 'info': mname = mname + '_info' mtype = 'gauge' elif mtype == 'stateset': mtype = 'gauge' elif mtype == 'gaugehistogram': # A gauge histogram is really a gauge, # but this captures the structure better. mtype = 'histogram' elif mtype == 'unknown': mtype = 'untyped' output.append('# HELP {} {}\n'.format( mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append(f'# TYPE {mname} {mtype}\n') om_samples: Dict[str, List[str]] = {} for s in metric.samples: for suffix in ['_created', '_gsum', '_gcount']: if s.name == metric.name + suffix: # OpenMetrics specific sample, put in a gauge at the end. om_samples.setdefault(suffix, []).append(sample_line(s)) break else: output.append(sample_line(s)) except Exception as exception: exception.args = (exception.args or ('',)) + (metric,) raise for suffix, lines in sorted(om_samples.items()): output.append('# HELP {}{} {}\n'.format(metric.name, suffix, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append(f'# TYPE {metric.name}{suffix} gauge\n') output.extend(lines) return ''.join(output).encode('utf-8') from .asgi import make_asgi_app class CollectorRegistry(Collector): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe: bool = False, target_info: Optional[Dict[str, str]] = None): self._collector_to_names: Dict[Collector, List[str]] = {} self._names_to_collectors: Dict[str, Collector] = {} self._auto_describe = auto_describe self._lock = Lock() self._target_info: Optional[Dict[str, str]] = {} self.set_target_info(target_info) def register(self, collector: Collector) -> None: """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector: Collector) -> None: """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces and clashes with.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): result.append(metric.name) for suffix in type_suffixes.get(metric.type, []): result.append(metric.name + suffix) return result def collect(self) -> Iterable[Metric]: """Yields metrics from the collectors in the registry.""" collectors = None ti = None with self._lock: collectors = copy.copy(self._collector_to_names) if self._target_info: ti = self._target_info_metric() if ti: yield ti for collector in collectors: yield from collector.collect() def restricted_registry(self, names: Iterable[str]) -> "RestrictedRegistry": """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) return RestrictedRegistry(names, self) def set_target_info(self, labels: Optional[Dict[str, str]]) -> None: with self._lock: if labels: if not self._target_info and 'target_info' in self._names_to_collectors: raise ValueError('CollectorRegistry already contains a target_info metric') self._names_to_collectors['target_info'] = _EmptyCollector() elif self._target_info: self._names_to_collectors.pop('target_info', None) self._target_info = labels def get_target_info(self) -> Optional[Dict[str, str]]: with self._lock: return self._target_info def _target_info_metric(self): m = Metric('target', 'Target metadata', 'info') m.add_sample('target_info', self._target_info, 1) return m def get_sample_value(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[float]: """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None The provided code snippet includes necessary dependencies for implementing the `write_to_textfile` function. Write a Python function `def write_to_textfile(path: str, registry: CollectorRegistry) -> None` to solve the following problem: Write metrics to the given path. This is intended for use with the Node exporter textfile collector. The path must end in .prom for the textfile collector to process it. Here is the function: def write_to_textfile(path: str, registry: CollectorRegistry) -> None: """Write metrics to the given path. This is intended for use with the Node exporter textfile collector. The path must end in .prom for the textfile collector to process it.""" tmppath = f'{path}.{os.getpid()}.{threading.current_thread().ident}' with open(tmppath, 'wb') as f: f.write(generate_latest(registry)) # rename(2) is atomic but fails on Windows if the destination file exists if os.name == 'nt': os.replace(tmppath, path) else: os.rename(tmppath, path)
Write metrics to the given path. This is intended for use with the Node exporter textfile collector. The path must end in .prom for the textfile collector to process it.
174,535
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString class _PrometheusRedirectHandler(HTTPRedirectHandler): """ Allow additional methods (e.g. PUT) and data forwarding in redirects. Use of this class constitute a user's explicit agreement to the redirect responses the Prometheus client will receive when using it. You should only use this class if you control or otherwise trust the redirect behavior involved and are certain it is safe to full transfer the original request (method and data) to the redirected URL. For example, if you know there is a cosmetic URL redirect in front of a local deployment of a Prometheus server, and all redirects are safe, this is the class to use to handle redirects in that case. The standard HTTPRedirectHandler does not forward request data nor does it allow redirected PUT requests (which Prometheus uses for some operations, for example `push_to_gateway`) because these cannot generically guarantee no violations of HTTP RFC 2616 requirements for the user to explicitly confirm redirects that could have unexpected side effects (such as rendering a PUT request non-idempotent or creating multiple resources not named in the original request). """ def redirect_request(self, req, fp, code, msg, headers, newurl): """ Apply redirect logic to a request. See parent HTTPRedirectHandler.redirect_request for parameter info. If the redirect is disallowed, this raises the corresponding HTTP error. If the redirect can't be determined, return None to allow other handlers to try. If the redirect is allowed, return the new request. This method specialized for the case when (a) the user knows that the redirect will not cause unacceptable side effects for any request method, and (b) the user knows that any request data should be passed through to the redirect. If either condition is not met, this should not be used. """ # note that requests being provided by a handler will use get_method to # indicate the method, by monkeypatching this, instead of setting the # Request object's method attribute. m = getattr(req, "method", req.get_method()) if not (code in (301, 302, 303, 307) and m in ("GET", "HEAD") or code in (301, 302, 303) and m in ("POST", "PUT")): raise HTTPError(req.full_url, code, msg, headers, fp) new_request = Request( newurl.replace(' ', '%20'), # space escaping in new url if needed. headers=req.headers, origin_req_host=req.origin_req_host, unverifiable=True, data=req.data, ) new_request.method = m return new_request def _make_handler( url: str, method: str, timeout: Optional[float], headers: Sequence[Tuple[str, str]], data: bytes, base_handler: Union[BaseHandler, type], ) -> Callable[[], None]: def handle() -> None: request = Request(url, data=data) request.get_method = lambda: method # type: ignore for k, v in headers: request.add_header(k, v) resp = build_opener(base_handler).open(request, timeout=timeout) if resp.code >= 400: raise OSError(f"error talking to pushgateway: {resp.code} {resp.msg}") return handle from .asgi import make_asgi_app class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Tuple(BaseTypingInstance): def _is_homogenous(self): # To specify a variable-length tuple of homogeneous type, Tuple[T, ...] # is used. return self._generics_manager.is_homogenous_tuple() def py__simple_getitem__(self, index): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) else: if isinstance(index, int): return self._generics_manager.get_index_and_execute(index) debug.dbg('The getitem type on Tuple was %s' % index) return NO_VALUES def py__iter__(self, contextualized_node=None): if self._is_homogenous(): yield LazyKnownValues(self._generics_manager.get_index_and_execute(0)) else: for v in self._generics_manager.to_tuple(): yield LazyKnownValues(v.execute_annotation()) def py__getitem__(self, index_value_set, contextualized_node): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) return ValueSet.from_sets( self._generics_manager.to_tuple() ).execute_annotation() def _get_wrapped_value(self): tuple_, = self.inference_state.builtins_module \ .py__getattribute__('tuple').execute_annotation() return tuple_ def name(self): return self._wrapped_value.name def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts value_set = value_set.filter( lambda x: x.py__name__().lower() == 'tuple', ) if self._is_homogenous(): # The parameter annotation is of the form `Tuple[T, ...]`, # so we treat the incoming tuple like a iterable sequence # rather than a positional container of elements. return self._class_value.get_generics()[0].infer_type_vars( value_set.merge_types_of_iterate(), ) else: # The parameter annotation has only explicit type parameters # (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we # treat the incoming values as needing to match the annotation # exactly, just as we would for non-tuple annotations. type_var_dict = {} for element in value_set: try: method = element.get_annotated_class_object except AttributeError: # This might still happen, because the tuple name matching # above is not 100% correct, so just catch the remaining # cases here. continue py_class = method() merge_type_var_dicts( type_var_dict, merge_pairwise_generics(self._class_value, py_class), ) return type_var_dict Optional: _SpecialForm = ... List = _Alias() The provided code snippet includes necessary dependencies for implementing the `passthrough_redirect_handler` function. Write a Python function `def passthrough_redirect_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]` to solve the following problem: Handler that automatically trusts redirect responses for all HTTP methods. Augments standard HTTPRedirectHandler capability by permitting PUT requests, preserving the method upon redirect, and passing through all headers and data from the original request. Only use this handler if you control or trust the source of redirect responses you encounter when making requests via the Prometheus client. This handler will simply repeat the identical request, including same method and data, to the new redirect URL. Here is the function: def passthrough_redirect_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]: """ Handler that automatically trusts redirect responses for all HTTP methods. Augments standard HTTPRedirectHandler capability by permitting PUT requests, preserving the method upon redirect, and passing through all headers and data from the original request. Only use this handler if you control or trust the source of redirect responses you encounter when making requests via the Prometheus client. This handler will simply repeat the identical request, including same method and data, to the new redirect URL.""" return _make_handler(url, method, timeout, headers, data, _PrometheusRedirectHandler)
Handler that automatically trusts redirect responses for all HTTP methods. Augments standard HTTPRedirectHandler capability by permitting PUT requests, preserving the method upon redirect, and passing through all headers and data from the original request. Only use this handler if you control or trust the source of redirect responses you encounter when making requests via the Prometheus client. This handler will simply repeat the identical request, including same method and data, to the new redirect URL.
174,536
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def default_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]: """Default handler that implements HTTP/HTTPS connections. Used by the push_to_gateway functions. Can be re-used by other handlers.""" return _make_handler(url, method, timeout, headers, data, HTTPHandler) from .asgi import make_asgi_app class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Tuple(BaseTypingInstance): def _is_homogenous(self): # To specify a variable-length tuple of homogeneous type, Tuple[T, ...] # is used. return self._generics_manager.is_homogenous_tuple() def py__simple_getitem__(self, index): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) else: if isinstance(index, int): return self._generics_manager.get_index_and_execute(index) debug.dbg('The getitem type on Tuple was %s' % index) return NO_VALUES def py__iter__(self, contextualized_node=None): if self._is_homogenous(): yield LazyKnownValues(self._generics_manager.get_index_and_execute(0)) else: for v in self._generics_manager.to_tuple(): yield LazyKnownValues(v.execute_annotation()) def py__getitem__(self, index_value_set, contextualized_node): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) return ValueSet.from_sets( self._generics_manager.to_tuple() ).execute_annotation() def _get_wrapped_value(self): tuple_, = self.inference_state.builtins_module \ .py__getattribute__('tuple').execute_annotation() return tuple_ def name(self): return self._wrapped_value.name def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts value_set = value_set.filter( lambda x: x.py__name__().lower() == 'tuple', ) if self._is_homogenous(): # The parameter annotation is of the form `Tuple[T, ...]`, # so we treat the incoming tuple like a iterable sequence # rather than a positional container of elements. return self._class_value.get_generics()[0].infer_type_vars( value_set.merge_types_of_iterate(), ) else: # The parameter annotation has only explicit type parameters # (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we # treat the incoming values as needing to match the annotation # exactly, just as we would for non-tuple annotations. type_var_dict = {} for element in value_set: try: method = element.get_annotated_class_object except AttributeError: # This might still happen, because the tuple name matching # above is not 100% correct, so just catch the remaining # cases here. continue py_class = method() merge_type_var_dicts( type_var_dict, merge_pairwise_generics(self._class_value, py_class), ) return type_var_dict Optional: _SpecialForm = ... List = _Alias() The provided code snippet includes necessary dependencies for implementing the `basic_auth_handler` function. Write a Python function `def basic_auth_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, username: Optional[str] = None, password: Optional[str] = None, ) -> Callable[[], None]` to solve the following problem: Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers. Here is the function: def basic_auth_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, username: Optional[str] = None, password: Optional[str] = None, ) -> Callable[[], None]: """Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers.""" def handle(): """Handler that implements HTTP Basic Auth. """ if username is not None and password is not None: auth_value = f'{username}:{password}'.encode() auth_token = base64.b64encode(auth_value) auth_header = b'Basic ' + auth_token headers.append(('Authorization', auth_header)) default_handler(url, method, timeout, headers, data)() return handle
Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers.
174,537
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def _make_handler( url: str, method: str, timeout: Optional[float], headers: Sequence[Tuple[str, str]], data: bytes, base_handler: Union[BaseHandler, type], ) -> Callable[[], None]: def handle() -> None: request = Request(url, data=data) request.get_method = lambda: method # type: ignore for k, v in headers: request.add_header(k, v) resp = build_opener(base_handler).open(request, timeout=timeout) if resp.code >= 400: raise OSError(f"error talking to pushgateway: {resp.code} {resp.msg}") return handle from .asgi import make_asgi_app class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Tuple(BaseTypingInstance): def _is_homogenous(self): # To specify a variable-length tuple of homogeneous type, Tuple[T, ...] # is used. return self._generics_manager.is_homogenous_tuple() def py__simple_getitem__(self, index): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) else: if isinstance(index, int): return self._generics_manager.get_index_and_execute(index) debug.dbg('The getitem type on Tuple was %s' % index) return NO_VALUES def py__iter__(self, contextualized_node=None): if self._is_homogenous(): yield LazyKnownValues(self._generics_manager.get_index_and_execute(0)) else: for v in self._generics_manager.to_tuple(): yield LazyKnownValues(v.execute_annotation()) def py__getitem__(self, index_value_set, contextualized_node): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) return ValueSet.from_sets( self._generics_manager.to_tuple() ).execute_annotation() def _get_wrapped_value(self): tuple_, = self.inference_state.builtins_module \ .py__getattribute__('tuple').execute_annotation() return tuple_ def name(self): return self._wrapped_value.name def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts value_set = value_set.filter( lambda x: x.py__name__().lower() == 'tuple', ) if self._is_homogenous(): # The parameter annotation is of the form `Tuple[T, ...]`, # so we treat the incoming tuple like a iterable sequence # rather than a positional container of elements. return self._class_value.get_generics()[0].infer_type_vars( value_set.merge_types_of_iterate(), ) else: # The parameter annotation has only explicit type parameters # (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we # treat the incoming values as needing to match the annotation # exactly, just as we would for non-tuple annotations. type_var_dict = {} for element in value_set: try: method = element.get_annotated_class_object except AttributeError: # This might still happen, because the tuple name matching # above is not 100% correct, so just catch the remaining # cases here. continue py_class = method() merge_type_var_dicts( type_var_dict, merge_pairwise_generics(self._class_value, py_class), ) return type_var_dict Optional: _SpecialForm = ... List = _Alias() The provided code snippet includes necessary dependencies for implementing the `tls_auth_handler` function. Write a Python function `def tls_auth_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, certfile: str, keyfile: str, cafile: Optional[str] = None, protocol: int = ssl.PROTOCOL_TLS_CLIENT, insecure_skip_verify: bool = False, ) -> Callable[[], None]` to solve the following problem: Handler that implements an HTTPS connection with TLS Auth. The default protocol (ssl.PROTOCOL_TLS_CLIENT) will also enable ssl.CERT_REQUIRED and SSLContext.check_hostname by default. This can be disabled by setting insecure_skip_verify to True. Both this handler and the TLS feature on pushgateay are experimental. Here is the function: def tls_auth_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, certfile: str, keyfile: str, cafile: Optional[str] = None, protocol: int = ssl.PROTOCOL_TLS_CLIENT, insecure_skip_verify: bool = False, ) -> Callable[[], None]: """Handler that implements an HTTPS connection with TLS Auth. The default protocol (ssl.PROTOCOL_TLS_CLIENT) will also enable ssl.CERT_REQUIRED and SSLContext.check_hostname by default. This can be disabled by setting insecure_skip_verify to True. Both this handler and the TLS feature on pushgateay are experimental.""" context = ssl.SSLContext(protocol=protocol) if cafile is not None: context.load_verify_locations(cafile) else: context.load_default_certs() if insecure_skip_verify: context.check_hostname = False context.verify_mode = ssl.CERT_NONE context.load_cert_chain(certfile=certfile, keyfile=keyfile) handler = HTTPSHandler(context=context) return _make_handler(url, method, timeout, headers, data, handler)
Handler that implements an HTTPS connection with TLS Auth. The default protocol (ssl.PROTOCOL_TLS_CLIENT) will also enable ssl.CERT_REQUIRED and SSLContext.check_hostname by default. This can be disabled by setting insecure_skip_verify to True. Both this handler and the TLS feature on pushgateay are experimental.
174,538
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def default_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]: """Default handler that implements HTTP/HTTPS connections. Used by the push_to_gateway functions. Can be re-used by other handlers.""" return _make_handler(url, method, timeout, headers, data, HTTPHandler) def _use_gateway( method: str, gateway: str, job: str, registry: Optional[CollectorRegistry], grouping_key: Optional[Dict[str, Any]], timeout: Optional[float], handler: Callable, ) -> None: gateway_url = urlparse(gateway) # See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6. if not gateway_url.scheme or ( PYTHON376_OR_NEWER and gateway_url.scheme not in ['http', 'https'] ): gateway = f'http://{gateway}' gateway = gateway.rstrip('/') url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job)) data = b'' if method != 'DELETE': if registry is None: registry = REGISTRY data = generate_latest(registry) if grouping_key is None: grouping_key = {} url += ''.join( '/{}/{}'.format(*_escape_grouping_key(str(k), str(v))) for k, v in sorted(grouping_key.items())) handler( url=url, method=method, timeout=timeout, headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data, )() from .asgi import make_asgi_app Any = object() Optional: _SpecialForm = ... Dict = _Alias() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class CollectorRegistry(Collector): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe: bool = False, target_info: Optional[Dict[str, str]] = None): self._collector_to_names: Dict[Collector, List[str]] = {} self._names_to_collectors: Dict[str, Collector] = {} self._auto_describe = auto_describe self._lock = Lock() self._target_info: Optional[Dict[str, str]] = {} self.set_target_info(target_info) def register(self, collector: Collector) -> None: """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector: Collector) -> None: """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces and clashes with.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): result.append(metric.name) for suffix in type_suffixes.get(metric.type, []): result.append(metric.name + suffix) return result def collect(self) -> Iterable[Metric]: """Yields metrics from the collectors in the registry.""" collectors = None ti = None with self._lock: collectors = copy.copy(self._collector_to_names) if self._target_info: ti = self._target_info_metric() if ti: yield ti for collector in collectors: yield from collector.collect() def restricted_registry(self, names: Iterable[str]) -> "RestrictedRegistry": """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) return RestrictedRegistry(names, self) def set_target_info(self, labels: Optional[Dict[str, str]]) -> None: with self._lock: if labels: if not self._target_info and 'target_info' in self._names_to_collectors: raise ValueError('CollectorRegistry already contains a target_info metric') self._names_to_collectors['target_info'] = _EmptyCollector() elif self._target_info: self._names_to_collectors.pop('target_info', None) self._target_info = labels def get_target_info(self) -> Optional[Dict[str, str]]: with self._lock: return self._target_info def _target_info_metric(self): m = Metric('target', 'Target metadata', 'info') m.add_sample('target_info', self._target_info, 1) return m def get_sample_value(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[float]: """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None The provided code snippet includes necessary dependencies for implementing the `push_to_gateway` function. Write a Python function `def push_to_gateway( gateway: str, job: str, registry: CollectorRegistry, grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None` to solve the following problem: Push metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. If not None, the argument must be a function which accepts the following arguments: url, method, timeout, headers, and content May be used to implement additional functionality not supported by the built-in default handler (such as SSL client certicates, and HTTP authentication mechanisms). 'url' is the URL for the request, the 'gateway' argument described earlier will form the basis of this URL. 'method' is the HTTP method which should be used when carrying out the request. 'timeout' requests not successfully completed after this many seconds should be aborted. If timeout is None, then the handler should not set a timeout. 'headers' is a list of ("header-name","header-value") tuples which must be passed to the pushgateway in the form of HTTP request headers. The function should raise an exception (e.g. IOError) on failure. 'content' is the data which should be used to form the HTTP Message Body. This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method. Here is the function: def push_to_gateway( gateway: str, job: str, registry: CollectorRegistry, grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None: """Push metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. If not None, the argument must be a function which accepts the following arguments: url, method, timeout, headers, and content May be used to implement additional functionality not supported by the built-in default handler (such as SSL client certicates, and HTTP authentication mechanisms). 'url' is the URL for the request, the 'gateway' argument described earlier will form the basis of this URL. 'method' is the HTTP method which should be used when carrying out the request. 'timeout' requests not successfully completed after this many seconds should be aborted. If timeout is None, then the handler should not set a timeout. 'headers' is a list of ("header-name","header-value") tuples which must be passed to the pushgateway in the form of HTTP request headers. The function should raise an exception (e.g. IOError) on failure. 'content' is the data which should be used to form the HTTP Message Body. This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method.""" _use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
Push metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. If not None, the argument must be a function which accepts the following arguments: url, method, timeout, headers, and content May be used to implement additional functionality not supported by the built-in default handler (such as SSL client certicates, and HTTP authentication mechanisms). 'url' is the URL for the request, the 'gateway' argument described earlier will form the basis of this URL. 'method' is the HTTP method which should be used when carrying out the request. 'timeout' requests not successfully completed after this many seconds should be aborted. If timeout is None, then the handler should not set a timeout. 'headers' is a list of ("header-name","header-value") tuples which must be passed to the pushgateway in the form of HTTP request headers. The function should raise an exception (e.g. IOError) on failure. 'content' is the data which should be used to form the HTTP Message Body. This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method.
174,539
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def default_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]: """Default handler that implements HTTP/HTTPS connections. Used by the push_to_gateway functions. Can be re-used by other handlers.""" return _make_handler(url, method, timeout, headers, data, HTTPHandler) def _use_gateway( method: str, gateway: str, job: str, registry: Optional[CollectorRegistry], grouping_key: Optional[Dict[str, Any]], timeout: Optional[float], handler: Callable, ) -> None: gateway_url = urlparse(gateway) # See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6. if not gateway_url.scheme or ( PYTHON376_OR_NEWER and gateway_url.scheme not in ['http', 'https'] ): gateway = f'http://{gateway}' gateway = gateway.rstrip('/') url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job)) data = b'' if method != 'DELETE': if registry is None: registry = REGISTRY data = generate_latest(registry) if grouping_key is None: grouping_key = {} url += ''.join( '/{}/{}'.format(*_escape_grouping_key(str(k), str(v))) for k, v in sorted(grouping_key.items())) handler( url=url, method=method, timeout=timeout, headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data, )() from .asgi import make_asgi_app Any = object() Optional: _SpecialForm = ... Dict = _Alias() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class CollectorRegistry(Collector): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe: bool = False, target_info: Optional[Dict[str, str]] = None): self._collector_to_names: Dict[Collector, List[str]] = {} self._names_to_collectors: Dict[str, Collector] = {} self._auto_describe = auto_describe self._lock = Lock() self._target_info: Optional[Dict[str, str]] = {} self.set_target_info(target_info) def register(self, collector: Collector) -> None: """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector: Collector) -> None: """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces and clashes with.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): result.append(metric.name) for suffix in type_suffixes.get(metric.type, []): result.append(metric.name + suffix) return result def collect(self) -> Iterable[Metric]: """Yields metrics from the collectors in the registry.""" collectors = None ti = None with self._lock: collectors = copy.copy(self._collector_to_names) if self._target_info: ti = self._target_info_metric() if ti: yield ti for collector in collectors: yield from collector.collect() def restricted_registry(self, names: Iterable[str]) -> "RestrictedRegistry": """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) return RestrictedRegistry(names, self) def set_target_info(self, labels: Optional[Dict[str, str]]) -> None: with self._lock: if labels: if not self._target_info and 'target_info' in self._names_to_collectors: raise ValueError('CollectorRegistry already contains a target_info metric') self._names_to_collectors['target_info'] = _EmptyCollector() elif self._target_info: self._names_to_collectors.pop('target_info', None) self._target_info = labels def get_target_info(self) -> Optional[Dict[str, str]]: with self._lock: return self._target_info def _target_info_metric(self): m = Metric('target', 'Target metadata', 'info') m.add_sample('target_info', self._target_info, 1) return m def get_sample_value(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[float]: """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None The provided code snippet includes necessary dependencies for implementing the `pushadd_to_gateway` function. Write a Python function `def pushadd_to_gateway( gateway: str, job: str, registry: Optional[CollectorRegistry], grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None` to solve the following problem: PushAdd metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This replaces metrics with the same name, job and grouping_key. This uses the POST HTTP method. Here is the function: def pushadd_to_gateway( gateway: str, job: str, registry: Optional[CollectorRegistry], grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None: """PushAdd metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This replaces metrics with the same name, job and grouping_key. This uses the POST HTTP method.""" _use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
PushAdd metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This replaces metrics with the same name, job and grouping_key. This uses the POST HTTP method.
174,540
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString def default_handler( url: str, method: str, timeout: Optional[float], headers: List[Tuple[str, str]], data: bytes, ) -> Callable[[], None]: """Default handler that implements HTTP/HTTPS connections. Used by the push_to_gateway functions. Can be re-used by other handlers.""" return _make_handler(url, method, timeout, headers, data, HTTPHandler) def _use_gateway( method: str, gateway: str, job: str, registry: Optional[CollectorRegistry], grouping_key: Optional[Dict[str, Any]], timeout: Optional[float], handler: Callable, ) -> None: gateway_url = urlparse(gateway) # See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6. if not gateway_url.scheme or ( PYTHON376_OR_NEWER and gateway_url.scheme not in ['http', 'https'] ): gateway = f'http://{gateway}' gateway = gateway.rstrip('/') url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job)) data = b'' if method != 'DELETE': if registry is None: registry = REGISTRY data = generate_latest(registry) if grouping_key is None: grouping_key = {} url += ''.join( '/{}/{}'.format(*_escape_grouping_key(str(k), str(v))) for k, v in sorted(grouping_key.items())) handler( url=url, method=method, timeout=timeout, headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data, )() from .asgi import make_asgi_app Any = object() Optional: _SpecialForm = ... Dict = _Alias() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) The provided code snippet includes necessary dependencies for implementing the `delete_from_gateway` function. Write a Python function `def delete_from_gateway( gateway: str, job: str, grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None` to solve the following problem: Delete metrics from the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long delete will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This deletes metrics with the given job and grouping_key. This uses the DELETE HTTP method. Here is the function: def delete_from_gateway( gateway: str, job: str, grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, ) -> None: """Delete metrics from the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long delete will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This deletes metrics with the given job and grouping_key. This uses the DELETE HTTP method.""" _use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
Delete metrics from the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long delete will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This deletes metrics with the given job and grouping_key. This uses the DELETE HTTP method.
174,541
import base64 from contextlib import closing import gzip from http.server import BaseHTTPRequestHandler import os import socket from socketserver import ThreadingMixIn import ssl import sys import threading from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( BaseHandler, build_opener, HTTPHandler, HTTPRedirectHandler, HTTPSHandler, Request, ) from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics from .registry import CollectorRegistry, REGISTRY from .utils import floatToGoString from .asgi import make_asgi_app class closing(ContextManager[_T]): def __init__(self, thing: _T) -> None: ... Any = object() Dict = _Alias() The provided code snippet includes necessary dependencies for implementing the `instance_ip_grouping_key` function. Write a Python function `def instance_ip_grouping_key() -> Dict[str, Any]` to solve the following problem: Grouping key with instance set to the IP Address of this host. Here is the function: def instance_ip_grouping_key() -> Dict[str, Any]: """Grouping key with instance set to the IP Address of this host.""" with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s: if sys.platform == 'darwin': # This check is done this way only on MacOS devices # it is done this way because the localhost method does # not work. # This method was adapted from this StackOverflow answer: # https://stackoverflow.com/a/28950776 s.connect(('10.255.255.255', 1)) else: s.connect(('localhost', 0)) return {'instance': s.getsockname()[0]}
Grouping key with instance set to the IP Address of this host.
174,542
import json import mmap import os import struct from typing import List _pack_double_func = struct.Struct(b'd').pack def _pack_double(data, pos, value): data[pos:pos + 8] = _pack_double_func(value)
null
174,543
import json import mmap import os import struct from typing import List _pack_integer_func = struct.Struct(b'i').pack def _pack_integer(data, pos, value): data[pos:pos + 4] = _pack_integer_func(value)
null
174,544
import json import mmap import os import struct from typing import List _unpack_integer = struct.Struct(b'i').unpack_from _unpack_double = struct.Struct(b'd').unpack_from The provided code snippet includes necessary dependencies for implementing the `_read_all_values` function. Write a Python function `def _read_all_values(data, used=0)` to solve the following problem: Yield (key, value, pos). No locking is performed. Here is the function: def _read_all_values(data, used=0): """Yield (key, value, pos). No locking is performed.""" if used <= 0: # If not valid `used` value is passed in, read it from the file. used = _unpack_integer(data, 0)[0] pos = 8 while pos < used: encoded_len = _unpack_integer(data, pos)[0] # check we are not reading beyond bounds if encoded_len + pos > used: raise RuntimeError('Read beyond file size detected, file is corrupted.') pos += 4 encoded_key = data[pos:pos + encoded_len] padded_len = encoded_len + (8 - (encoded_len + 4) % 8) pos += padded_len value = _unpack_double(data, pos)[0] yield encoded_key.decode('utf-8'), value, pos pos += 8
Yield (key, value, pos). No locking is performed.
174,545
from collections import defaultdict import glob import json import os import warnings from .metrics import Gauge from .metrics_core import Metric from .mmap_dict import MmapedDict from .samples import Sample from .utils import floatToGoString _LIVE_GAUGE_MULTIPROCESS_MODES = {m for m in Gauge._MULTIPROC_MODES if m.startswith('live')} The provided code snippet includes necessary dependencies for implementing the `mark_process_dead` function. Write a Python function `def mark_process_dead(pid, path=None)` to solve the following problem: Do bookkeeping for when one process dies in a multi-process setup. Here is the function: def mark_process_dead(pid, path=None): """Do bookkeeping for when one process dies in a multi-process setup.""" if path is None: path = os.environ.get('PROMETHEUS_MULTIPROC_DIR', os.environ.get('prometheus_multiproc_dir')) for mode in _LIVE_GAUGE_MULTIPROCESS_MODES: for f in glob.glob(os.path.join(path, f'gauge_{mode}_{pid}.db')): os.remove(f)
Do bookkeeping for when one process dies in a multi-process setup.
174,546
import os from threading import Lock import time import types from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from . import values from .context_managers import ExceptionCounter, InprogressTracker, Timer from .metrics_core import ( Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, RESERVED_METRIC_LABEL_NAME_RE, ) from .registry import Collector, CollectorRegistry, REGISTRY from .samples import Exemplar, Sample from .utils import floatToGoString, INF def _build_full_name(metric_type, name, namespace, subsystem, unit): full_name = '' if namespace: full_name += namespace + '_' if subsystem: full_name += subsystem + '_' full_name += name if metric_type == 'counter' and full_name.endswith('_total'): full_name = full_name[:-6] # Munge to OpenMetrics. if unit and not full_name.endswith("_" + unit): full_name += "_" + unit if unit and metric_type in ('info', 'stateset'): raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name) return full_name
null
174,547
import os from threading import Lock import time import types from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from . import values from .context_managers import ExceptionCounter, InprogressTracker, Timer from .metrics_core import ( Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, RESERVED_METRIC_LABEL_NAME_RE, ) from .registry import Collector, CollectorRegistry, REGISTRY from .samples import Exemplar, Sample from .utils import floatToGoString, INF def _validate_labelname(l): if not METRIC_LABEL_NAME_RE.match(l): raise ValueError('Invalid label metric name: ' + l) if RESERVED_METRIC_LABEL_NAME_RE.match(l): raise ValueError('Reserved label metric name: ' + l) def _validate_labelnames(cls, labelnames): labelnames = tuple(labelnames) for l in labelnames: _validate_labelname(l) if l in cls._reserved_labelnames: raise ValueError('Reserved label metric name: ' + l) return labelnames
null
174,548
import os from threading import Lock import time import types from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from . import values from .context_managers import ExceptionCounter, InprogressTracker, Timer from .metrics_core import ( Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, RESERVED_METRIC_LABEL_NAME_RE, ) from .registry import Collector, CollectorRegistry, REGISTRY from .samples import Exemplar, Sample from .utils import floatToGoString, INF def _validate_labelname(l): if not METRIC_LABEL_NAME_RE.match(l): raise ValueError('Invalid label metric name: ' + l) if RESERVED_METRIC_LABEL_NAME_RE.match(l): raise ValueError('Reserved label metric name: ' + l) def _validate_exemplar(exemplar): runes = 0 for k, v in exemplar.items(): _validate_labelname(k) runes += len(k) runes += len(v) if runes > 128: raise ValueError('Exemplar labels have %d UTF-8 characters, exceeding the limit of 128')
null
174,549
import os from threading import Lock import time import types from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) from . import values from .context_managers import ExceptionCounter, InprogressTracker, Timer from .metrics_core import ( Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, RESERVED_METRIC_LABEL_NAME_RE, ) from .registry import Collector, CollectorRegistry, REGISTRY from .samples import Exemplar, Sample from .utils import floatToGoString, INF def _get_use_created() -> bool: return os.environ.get("PROMETHEUS_DISABLE_CREATED_SERIES", 'False').lower() not in ('true', '1', 't')
null
174,550
import sys import re import os import importlib import warnings is_pypy = '__pypy__' in sys.builtin_module_names def warn_distutils_present(): if 'distutils' not in sys.modules: return if is_pypy and sys.version_info < (3, 7): # PyPy for 3.6 unconditionally imports distutils, so bypass the warning # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 return warnings.warn( "Distutils was imported before Setuptools. This usage is discouraged " "and may exhibit undesirable behaviors or errors. Please use " "Setuptools' objects directly or at least import Setuptools first.")
null
174,551
import sys import re import os import importlib import warnings The provided code snippet includes necessary dependencies for implementing the `enabled` function. Write a Python function `def enabled()` to solve the following problem: Allow selection of distutils by environment variable. Here is the function: def enabled(): """ Allow selection of distutils by environment variable. """ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib') return which == 'local'
Allow selection of distutils by environment variable.
174,552
import sys import re import os import importlib import warnings def clear_distutils(): if 'distutils' not in sys.modules: return warnings.warn("Setuptools is replacing distutils.") mods = [name for name in sys.modules if re.match(r'distutils\b', name)] for name in mods: del sys.modules[name] def ensure_local_distutils(): clear_distutils() distutils = importlib.import_module('setuptools._distutils') distutils.__name__ = 'distutils' sys.modules['distutils'] = distutils # sanity check that submodules load as expected core = importlib.import_module('distutils.core') assert '_distutils' in core.__file__, core.__file__
null
174,553
import os import importlib.util import importlib.machinery from .py34compat import module_from_spec PY_SOURCE = 1 PY_COMPILED = 2 C_EXTENSION = 3 C_BUILTIN = 6 PY_FROZEN = 7 def find_spec(module, paths): finder = ( importlib.machinery.PathFinder().find_spec if isinstance(paths, list) else importlib.util.find_spec ) return finder(module, paths) The provided code snippet includes necessary dependencies for implementing the `find_module` function. Write a Python function `def find_module(module, paths=None)` to solve the following problem: Just like 'imp.find_module()', but with package support Here is the function: def find_module(module, paths=None): """Just like 'imp.find_module()', but with package support""" spec = find_spec(module, paths) if spec is None: raise ImportError("Can't find %s" % module) if not spec.has_location and hasattr(spec, 'submodule_search_locations'): spec = importlib.util.spec_from_loader('__init__.py', spec.loader) kind = -1 file = None static = isinstance(spec.loader, type) if spec.origin == 'frozen' or static and issubclass( spec.loader, importlib.machinery.FrozenImporter): kind = PY_FROZEN path = None # imp compabilty suffix = mode = '' # imp compability elif spec.origin == 'built-in' or static and issubclass( spec.loader, importlib.machinery.BuiltinImporter): kind = C_BUILTIN path = None # imp compabilty suffix = mode = '' # imp compability elif spec.has_location: path = spec.origin suffix = os.path.splitext(path)[1] mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb' if suffix in importlib.machinery.SOURCE_SUFFIXES: kind = PY_SOURCE elif suffix in importlib.machinery.BYTECODE_SUFFIXES: kind = PY_COMPILED elif suffix in importlib.machinery.EXTENSION_SUFFIXES: kind = C_EXTENSION if kind in {PY_SOURCE, PY_COMPILED}: file = open(path, mode) else: path = None suffix = mode = '' return file, path, (suffix, mode, kind)
Just like 'imp.find_module()', but with package support
174,554
import os import importlib.util import importlib.machinery from .py34compat import module_from_spec def find_spec(module, paths): finder = ( importlib.machinery.PathFinder().find_spec if isinstance(paths, list) else importlib.util.find_spec ) return finder(module, paths) def get_frozen_object(module, paths=None): spec = find_spec(module, paths) if not spec: raise ImportError("Can't find %s" % module) return spec.loader.get_code(module)
null
174,555
import os import importlib.util import importlib.machinery from .py34compat import module_from_spec def find_spec(module, paths): try: module_from_spec = importlib.util.module_from_spec except AttributeError: def module_from_spec(spec): def get_module(module, paths, info): spec = find_spec(module, paths) if not spec: raise ImportError("Can't find %s" % module) return module_from_spec(spec)
null
174,556
import unicodedata import sys from setuptools.extern import six def decompose(path): if isinstance(path, six.text_type): return unicodedata.normalize('NFD', path) try: path = path.decode('utf-8') path = unicodedata.normalize('NFD', path) path = path.encode('utf-8') except UnicodeError: pass # Not UTF-8 return path
null
174,557
import unicodedata import sys from setuptools.extern import six The provided code snippet includes necessary dependencies for implementing the `filesys_decode` function. Write a Python function `def filesys_decode(path)` to solve the following problem: Ensure that the given path is decoded, NONE when no expected encoding works Here is the function: def filesys_decode(path): """ Ensure that the given path is decoded, NONE when no expected encoding works """ if isinstance(path, six.text_type): return path fs_enc = sys.getfilesystemencoding() or 'utf-8' candidates = fs_enc, 'utf-8' for enc in candidates: try: return path.decode(enc) except UnicodeDecodeError: continue
Ensure that the given path is decoded, NONE when no expected encoding works
174,558
import unicodedata import sys from setuptools.extern import six The provided code snippet includes necessary dependencies for implementing the `try_encode` function. Write a Python function `def try_encode(string, enc)` to solve the following problem: turn unicode encoding into a functional routine Here is the function: def try_encode(string, enc): "turn unicode encoding into a functional routine" try: return string.encode(enc) except UnicodeEncodeError: return None
turn unicode encoding into a functional routine
174,559
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def _get_purelib(): return get_path("purelib")
null
174,560
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def _get_purelib(): return get_python_lib(False)
null
174,561
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def strip_module(filename): if '.' in filename: filename = os.path.splitext(filename)[0] if filename.endswith('module'): filename = filename[:-6] return filename
null
174,562
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def write_stub(resource, pyfile): _stub_template = textwrap.dedent(""" def __bootstrap__(): global __bootstrap__, __loader__, __file__ import sys, pkg_resources from importlib.machinery import ExtensionFileLoader __file__ = pkg_resources.resource_filename(__name__, %r) __loader__ = None; del __bootstrap__, __loader__ ExtensionFileLoader(__name__,__file__).load_module() __bootstrap__() """).lstrip() with open(pyfile, 'w') as f: f.write(_stub_template % resource)
null
174,563
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = sorted_walk(egg_dir) base, dirs, files = next(walker) if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base, dirs, files for bdf in walker: yield bdf safety_flags = { True: 'zip-safe', False: 'not-zip-safe', } def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base, name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] if six.PY2: skip = 8 # skip magic & date elif sys.version_info < (3, 7): skip = 12 # skip magic & date & file size else: skip = 16 # skip magic & reserved? & date & file size f = open(filename, 'rb') f.read(skip) code = marshal.load(f) f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False return safe def can_scan(): if not sys.platform.startswith('java') and sys.platform != 'cli': # CPython, PyPy, etc. return True log.warn("Unable to analyze compiled code on this platform.") log.warn("Please ask the author to include a 'zip_safe'" " setting (either True or False) in the package's setup.py") def analyze_egg(egg_dir, stubs): # check for existing flag in EGG-INFO for flag, fn in safety_flags.items(): if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)): return flag if not can_scan(): return False safe = True for base, dirs, files in walk_egg(egg_dir): for name in files: if name.endswith('.py') or name.endswith('.pyw'): continue elif name.endswith('.pyc') or name.endswith('.pyo'): # always scan, even if we already know we're not safe safe = scan_module(egg_dir, base, name, stubs) and safe return safe
null
174,564
from distutils.errors import DistutilsSetupError from distutils.dir_util import remove_tree, mkpath from distutils import log from types import CodeType import sys import os import re import textwrap import marshal import warnings from setuptools.extern import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint from setuptools.extension import Library from setuptools import Command, SetuptoolsDeprecationWarning def sorted_walk(dir): """Do os.walk in a reproducible way, independent of indeterministic filesystem readdir order """ for base, dirs, files in os.walk(dir): dirs.sort() files.sort() yield base, dirs, files The provided code snippet includes necessary dependencies for implementing the `make_zipfile` function. Write a Python function `def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, mode='w')` to solve the following problem: Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. Here is the function: def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, mode='w'): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ import zipfile mkpath(os.path.dirname(zip_filename), dry_run=dry_run) log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit(z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): p = path[len(base_dir) + 1:] if not dry_run: z.write(path, p) log.debug("adding '%s'", p) compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED if not dry_run: z = zipfile.ZipFile(zip_filename, mode, compression=compression) for dirname, dirs, files in sorted_walk(base_dir): visit(z, dirname, files) z.close() else: for dirname, dirs, files in sorted_walk(base_dir): visit(None, dirname, files) return zip_filename
Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file.
174,565
from distutils.errors import DistutilsOptionError from setuptools.extern.six.moves import map from setuptools.command.setopt import edit_config, option_base, config_file The provided code snippet includes necessary dependencies for implementing the `shquote` function. Write a Python function `def shquote(arg)` to solve the following problem: Quote an argument for later parsing by shlex.split() Here is the function: def shquote(arg): """Quote an argument for later parsing by shlex.split()""" for c in '"', "'", "\\", "#": if c in arg: return repr(arg) if arg.split() != [arg]: return repr(arg) return arg
Quote an argument for later parsing by shlex.split()
174,566
from distutils.errors import DistutilsOptionError from setuptools.extern.six.moves import map from setuptools.command.setopt import edit_config, option_base, config_file def config_file(kind="local"): """Get the filename of the distutils, local, global, or per-user config `kind` must be one of "local", "global", or "user" """ if kind == 'local': return 'setup.cfg' if kind == 'global': return os.path.join( os.path.dirname(distutils.__file__), 'distutils.cfg' ) if kind == 'user': dot = os.name == 'posix' and '.' or '' return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) raise ValueError( "config_file() type must be 'local', 'global', or 'user'", kind ) def format_alias(name, aliases): source, command = aliases[name] if source == config_file('global'): source = '--global-config ' elif source == config_file('user'): source = '--user-config ' elif source == config_file('local'): source = '' else: source = '--filename=%r' % source return source + name + ' ' + command
null
174,567
import os import sys import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library from setuptools.extern import six from distutils.sysconfig import _config_vars as _CONFIG_VARS if sys.platform == "darwin": use_stubs = True elif os.name != 'nt': try: import dl use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW') except ImportError: pass def _customize_compiler_for_shlib(compiler): if sys.platform == "darwin": # building .dylib requires additional compiler flags on OSX; here we # temporarily substitute the pyconfig.h variables so that distutils' # 'customize_compiler' uses them before we build the shared libraries. tmp = _CONFIG_VARS.copy() try: # XXX Help! I don't have any idea whether these are right... _CONFIG_VARS['LDSHARED'] = ( "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") _CONFIG_VARS['CCSHARED'] = " -dynamiclib" _CONFIG_VARS['SO'] = ".dylib" customize_compiler(compiler) finally: _CONFIG_VARS.clear() _CONFIG_VARS.update(tmp) else: customize_compiler(compiler)
null
174,568
import os import sys import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library from setuptools.extern import six from distutils.sysconfig import _config_vars as _CONFIG_VARS have_rtld = False def if_dl(s): return s if have_rtld else ''
null
174,569
import os import sys import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library from setuptools.extern import six from distutils.sysconfig import _config_vars as _CONFIG_VARS The provided code snippet includes necessary dependencies for implementing the `get_abi3_suffix` function. Write a Python function `def get_abi3_suffix()` to solve the following problem: Return the file extension for an abi3-compliant Extension() Here is the function: def get_abi3_suffix(): """Return the file extension for an abi3-compliant Extension()""" for suffix in EXTENSION_SUFFIXES: if '.abi3' in suffix: # Unix return suffix elif suffix == '.pyd': # Windows return suffix
Return the file extension for an abi3-compliant Extension()
174,570
import os import sys import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library from setuptools.extern import six from distutils.sysconfig import _config_vars as _CONFIG_VARS def link_shared_object( self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): self.link( self.SHARED_LIBRARY, objects, output_libname, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang )
null
174,571
import os import sys import itertools from distutils.command.build_ext import build_ext as _du_build_ext from distutils.file_util import copy_file from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler, get_config_var from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library from setuptools.extern import six from distutils.sysconfig import _config_vars as _CONFIG_VARS def link_shared_object( self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # XXX we need to either disallow these attrs on Library instances, # or warn/abort here if set, or something... # libraries=None, library_dirs=None, runtime_library_dirs=None, # export_symbols=None, extra_preargs=None, extra_postargs=None, # build_temp=None assert output_dir is None # distutils build_ext doesn't pass this output_dir, filename = os.path.split(output_libname) basename, ext = os.path.splitext(filename) if self.library_filename("x").startswith('lib'): # strip 'lib' prefix; this is kludgy if some platform uses # a different prefix basename = basename[3:] self.create_static_lib( objects, basename, output_dir, debug, target_lang )
null
174,572
from base64 import standard_b64encode from distutils import log from distutils.errors import DistutilsOptionError import os import socket import zipfile import tempfile import shutil import itertools import functools from setuptools.extern import six from setuptools.extern.six.moves import http_client, urllib from pkg_resources import iter_entry_points from .upload import upload def _encode(s): errors = 'strict' if six.PY2 else 'surrogateescape' return s.encode('utf-8', errors)
null
174,573
from glob import glob from distutils.util import convert_path import distutils.command.build_py as orig import os import fnmatch import textwrap import io import distutils.errors import itertools import stat from setuptools.extern import six from setuptools.extern.six.moves import map, filter, filterfalse def make_writable(target): os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
null
174,574
from glob import glob from distutils.util import convert_path import distutils.command.build_py as orig import os import fnmatch import textwrap import io import distutils.errors import itertools import stat from setuptools.extern import six from setuptools.extern.six.moves import map, filter, filterfalse The provided code snippet includes necessary dependencies for implementing the `_unique_everseen` function. Write a Python function `def _unique_everseen(iterable, key=None)` to solve the following problem: List unique elements, preserving order. Remember all elements ever seen. Here is the function: def _unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
List unique elements, preserving order. Remember all elements ever seen.
174,575
from glob import glob from distutils.util import convert_path import distutils.command.build_py as orig import os import fnmatch import textwrap import io import distutils.errors import itertools import stat from setuptools.extern import six from setuptools.extern.six.moves import map, filter, filterfalse def assert_relative(path): if not os.path.isabs(path): return path from distutils.errors import DistutilsSetupError msg = textwrap.dedent(""" Error: setup script specifies an absolute path: %s setup() arguments must *always* be /-separated paths relative to the setup.py directory, *never* absolute paths. """).lstrip() % path raise DistutilsSetupError(msg)
null
174,576
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning The provided code snippet includes necessary dependencies for implementing the `translate_pattern` function. Write a Python function `def translate_pattern(glob)` to solve the following problem: Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories. Here is the function: def translate_pattern(glob): """ Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories. """ pat = '' # This will split on '/' within [character classes]. This is deliberate. chunks = glob.split(os.path.sep) sep = re.escape(os.sep) valid_char = '[^%s]' % (sep,) for c, chunk in enumerate(chunks): last_chunk = c == len(chunks) - 1 # Chunks that are a literal ** are globstars. They match anything. if chunk == '**': if last_chunk: # Match anything if this is the last component pat += '.*' else: # Match '(name/)*' pat += '(?:%s+%s)*' % (valid_char, sep) continue # Break here as the whole path component has been handled # Find any special characters in the remainder i = 0 chunk_len = len(chunk) while i < chunk_len: char = chunk[i] if char == '*': # Match any number of name characters pat += valid_char + '*' elif char == '?': # Match a name character pat += valid_char elif char == '[': # Character class inner_i = i + 1 # Skip initial !/] chars if inner_i < chunk_len and chunk[inner_i] == '!': inner_i = inner_i + 1 if inner_i < chunk_len and chunk[inner_i] == ']': inner_i = inner_i + 1 # Loop till the closing ] is found while inner_i < chunk_len and chunk[inner_i] != ']': inner_i = inner_i + 1 if inner_i >= chunk_len: # Got to the end of the string without finding a closing ] # Do not treat this as a matching group, but as a literal [ pat += re.escape(char) else: # Grab the insides of the [brackets] inner = chunk[i + 1:inner_i] char_class = '' # Class negation if inner[0] == '!': char_class = '^' inner = inner[1:] char_class += re.escape(inner) pat += '[%s]' % (char_class,) # Skip to the end ] i = inner_i else: pat += re.escape(char) i += 1 # Join each chunk with the dir separator if not last_chunk: pat += sep pat += r'\Z' return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories.
174,577
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning class egg_info(InfoCommon, Command): description = "create a distribution's .egg-info directory" user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" " (default: top of the source tree)"), ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), ('tag-build=', 'b', "Specify explicit tag to add to version number"), ('no-date', 'D', "Don't include date stamp [default]"), ] boolean_options = ['tag-date'] negative_opt = { 'no-date': 'tag-date', } def initialize_options(self): self.egg_base = None self.egg_name = None self.egg_info = None self.egg_version = None self.broken_egg_info = False #################################### # allow the 'tag_svn_revision' to be detected and # set, supporting sdists built on older Setuptools. def tag_svn_revision(self): pass def tag_svn_revision(self, value): pass #################################### def save_version_info(self, filename): """ Materialize the value of date into the build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds. """ egg_info = collections.OrderedDict() # follow the order these keys would have been added # when PYTHONHASHSEED=0 egg_info['tag_build'] = self.tags() egg_info['tag_date'] = 0 edit_config(filename, dict(egg_info=egg_info)) def finalize_options(self): # Note: we need to capture the current value returned # by `self.tagged_version()`, so we can later update # `self.distribution.metadata.version` without # repercussions. self.egg_name = self.name self.egg_version = self.tagged_version() parsed_version = parse_version(self.egg_version) try: is_version = isinstance(parsed_version, packaging.version.Version) spec = ( "%s==%s" if is_version else "%s===%s" ) list( parse_requirements(spec % (self.egg_name, self.egg_version)) ) except ValueError as e: raise distutils.errors.DistutilsOptionError( "Invalid distribution name or version syntax: %s-%s" % (self.egg_name, self.egg_version) ) from e if self.egg_base is None: dirs = self.distribution.package_dir self.egg_base = (dirs or {}).get('', os.curdir) self.ensure_dirname('egg_base') self.egg_info = to_filename(self.egg_name) + '.egg-info' if self.egg_base != os.curdir: self.egg_info = os.path.join(self.egg_base, self.egg_info) if '-' in self.egg_name: self.check_broken_egg_info() # Set package version for the benefit of dumber commands # (e.g. sdist, bdist_wininst, etc.) # self.distribution.metadata.version = self.egg_version # If we bootstrapped around the lack of a PKG-INFO, as might be the # case in a fresh checkout, make sure that any special tags get added # to the version info # pd = self.distribution._patched_dist if pd is not None and pd.key == self.egg_name.lower(): pd._version = self.egg_version pd._parsed_version = parse_version(self.egg_version) self.distribution._patched_dist = None def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename) def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if not six.PY2: data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close() def delete_file(self, filename): """Delete `filename` (if not a dry run) after announcing it""" log.info("deleting %s", filename) if not self.dry_run: os.unlink(filename) def run(self): self.mkpath(self.egg_info) os.utime(self.egg_info, None) installer = self.distribution.fetch_build_egg for ep in iter_entry_points('egg_info.writers'): ep.require(installer=installer) writer = ep.resolve() writer(self, ep.name, os.path.join(self.egg_info, ep.name)) # Get rid of native_libs.txt if it was put there by older bdist_egg nl = os.path.join(self.egg_info, "native_libs.txt") if os.path.exists(nl): self.delete_file(nl) self.find_sources() def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist def check_broken_egg_info(self): bei = self.egg_name + '.egg-info' if self.egg_base != os.curdir: bei = os.path.join(self.egg_base, bei) if os.path.exists(bei): log.warn( "-" * 78 + '\n' "Note: Your current .egg-info directory has a '-' in its name;" '\nthis will not work correctly with "setup.py develop".\n\n' 'Please rename %s to %s to correct this problem.\n' + '-' * 78, bei, self.egg_info ) self.broken_egg_info = self.egg_info self.egg_info = bei # make it work for now class bdist_egg(Command): description = "create an \"egg\" distribution" user_options = [ ('bdist-dir=', 'b', "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_build_platform()), ('exclude-source-files', None, "remove all .py files from the generated egg"), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ] boolean_options = [ 'keep-temp', 'skip-build', 'exclude-source-files' ] def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.dist_dir = None self.skip_build = 0 self.egg_output = None self.exclude_source_files = None def finalize_options(self): ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") self.egg_info = ei_cmd.egg_info if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'egg') if self.plat_name is None: self.plat_name = get_build_platform() self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) if self.egg_output is None: # Compute filename of the output egg basename = Distribution( None, None, ei_cmd.egg_name, ei_cmd.egg_version, get_python_version(), self.distribution.has_ext_modules() and self.plat_name ).egg_name() self.egg_output = os.path.join(self.dist_dir, basename + '.egg') def do_install_data(self): # Hack for packages that install data to install's --install-lib self.get_finalized_command('install').install_lib = self.bdist_dir site_packages = os.path.normcase(os.path.realpath(_get_purelib())) old, self.distribution.data_files = self.distribution.data_files, [] for item in old: if isinstance(item, tuple) and len(item) == 2: if os.path.isabs(item[0]): realpath = os.path.realpath(item[0]) normalized = os.path.normcase(realpath) if normalized == site_packages or normalized.startswith( site_packages + os.sep ): item = realpath[len(site_packages) + 1:], item[1] # XXX else: raise ??? self.distribution.data_files.append(item) try: log.info("installing package data to %s", self.bdist_dir) self.call_command('install_data', force=0, root=None) finally: self.distribution.data_files = old def get_outputs(self): return [self.egg_output] def call_command(self, cmdname, **kw): """Invoke reinitialized command `cmdname` with keyword args""" for dirname in INSTALL_DIRECTORY_ATTRS: kw.setdefault(dirname, self.bdist_dir) kw.setdefault('skip_build', self.skip_build) kw.setdefault('dry_run', self.dry_run) cmd = self.reinitialize_command(cmdname, **kw) self.run_command(cmdname) return cmd def run(self): # Generate metadata first self.run_command("egg_info") # We run install_lib before install_data, because some data hacks # pull their data path from the install_lib command. log.info("installing library code to %s", self.bdist_dir) instcmd = self.get_finalized_command('install') old_root = instcmd.root instcmd.root = None if self.distribution.has_c_libraries() and not self.skip_build: self.run_command('build_clib') cmd = self.call_command('install_lib', warn_dir=0) instcmd.root = old_root all_outputs, ext_outputs = self.get_ext_outputs() self.stubs = [] to_compile = [] for (p, ext_name) in enumerate(ext_outputs): filename, ext = os.path.splitext(ext_name) pyfile = os.path.join(self.bdist_dir, strip_module(filename) + '.py') self.stubs.append(pyfile) log.info("creating stub loader for %s", ext_name) if not self.dry_run: write_stub(os.path.basename(ext_name), pyfile) to_compile.append(pyfile) ext_outputs[p] = ext_name.replace(os.sep, '/') if to_compile: cmd.byte_compile(to_compile) if self.distribution.data_files: self.do_install_data() # Make the EGG-INFO directory archive_root = self.bdist_dir egg_info = os.path.join(archive_root, 'EGG-INFO') self.mkpath(egg_info) if self.distribution.scripts: script_dir = os.path.join(egg_info, 'scripts') log.info("installing scripts to %s", script_dir) self.call_command('install_scripts', install_dir=script_dir, no_ep=1) self.copy_metadata_to(egg_info) native_libs = os.path.join(egg_info, "native_libs.txt") if all_outputs: log.info("writing %s", native_libs) if not self.dry_run: ensure_directory(native_libs) libs_file = open(native_libs, 'wt') libs_file.write('\n'.join(all_outputs)) libs_file.write('\n') libs_file.close() elif os.path.isfile(native_libs): log.info("removing %s", native_libs) if not self.dry_run: os.unlink(native_libs) write_safety_flag( os.path.join(archive_root, 'EGG-INFO'), self.zip_safe() ) if os.path.exists(os.path.join(self.egg_info, 'depends.txt')): log.warn( "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." ) if self.exclude_source_files: self.zap_pyfiles() # Make the archive make_zipfile(self.egg_output, archive_root, verbose=self.verbose, dry_run=self.dry_run, mode=self.gen_header()) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) # Add to 'Distribution.dist_files' so that the "upload" command works getattr(self.distribution, 'dist_files', []).append( ('bdist_egg', get_python_version(), self.egg_output)) def zap_pyfiles(self): log.info("Removing .py files from temporary directory") for base, dirs, files in walk_egg(self.bdist_dir): for name in files: path = os.path.join(base, name) if name.endswith('.py'): log.debug("Deleting %s", path) os.unlink(path) if base.endswith('__pycache__'): path_old = path pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc' m = re.match(pattern, name) path_new = os.path.join( base, os.pardir, m.group('name') + '.pyc') log.info( "Renaming file from [%s] to [%s]" % (path_old, path_new)) try: os.remove(path_new) except OSError: pass os.rename(path_old, path_new) def zip_safe(self): safe = getattr(self.distribution, 'zip_safe', None) if safe is not None: return safe log.warn("zip_safe flag not set; analyzing archive contents...") return analyze_egg(self.bdist_dir, self.stubs) def gen_header(self): epm = EntryPoint.parse_map(self.distribution.entry_points or '') ep = epm.get('setuptools.installation', {}).get('eggsecutable') if ep is None: return 'w' # not an eggsecutable, do it the usual way. warnings.warn( "Eggsecutables are deprecated and will be removed in a future " "version.", SetuptoolsDeprecationWarning ) if not ep.attrs or ep.extras: raise DistutilsSetupError( "eggsecutable entry point (%r) cannot have 'extras' " "or refer to a module" % (ep,) ) pyver = '{}.{}'.format(*sys.version_info) pkg = ep.module_name full = '.'.join(ep.attrs) base = ep.attrs[0] basename = os.path.basename(self.egg_output) header = ( "#!/bin/sh\n" 'if [ `basename $0` = "%(basename)s" ]\n' 'then exec python%(pyver)s -c "' "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " "from %(pkg)s import %(base)s; sys.exit(%(full)s())" '" "$@"\n' 'else\n' ' echo $0 is not the correct name for this egg file.\n' ' echo Please rename it back to %(basename)s and try again.\n' ' exec false\n' 'fi\n' ) % locals() if not self.dry_run: mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) f = open(self.egg_output, 'w') f.write(header) f.close() return 'a' def copy_metadata_to(self, target_dir): "Copy metadata (egg info) to the target_dir" # normalize the path (so that a forward-slash in egg_info will # match using startswith below) norm_egg_info = os.path.normpath(self.egg_info) prefix = os.path.join(norm_egg_info, '') for path in self.ei_cmd.filelist.files: if path.startswith(prefix): target = os.path.join(target_dir, path[len(prefix):]) ensure_directory(target) self.copy_file(path, target) def get_ext_outputs(self): """Get a list of relative paths to C extensions in the output distro""" all_outputs = [] ext_outputs = [] paths = {self.bdist_dir: ''} for base, dirs, files in sorted_walk(self.bdist_dir): for filename in files: if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: all_outputs.append(paths[base] + filename) for filename in dirs: paths[os.path.join(base, filename)] = (paths[base] + filename + '/') if self.distribution.has_ext_modules(): build_cmd = self.get_finalized_command('build_ext') for ext in build_cmd.extensions: if isinstance(ext, Library): continue fullname = build_cmd.get_ext_fullname(ext.name) filename = build_cmd.get_ext_filename(fullname) if not os.path.basename(filename).startswith('dl-'): if os.path.exists(os.path.join(self.bdist_dir, filename)): ext_outputs.append(filename) return all_outputs, ext_outputs def write_pkg_info(cmd, basename, filename): log.info("writing %s", filename) if not cmd.dry_run: metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it metadata.write_pkg_info(cmd.egg_info) finally: metadata.name, metadata.version = oldname, oldver safe = getattr(cmd.distribution, 'zip_safe', None) bdist_egg.write_safety_flag(cmd.egg_info, safe)
null
174,578
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning def warn_depends_obsolete(cmd, basename, filename): if os.path.exists(filename): log.warn( "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" "Use the install_requires/extras_require setup() args instead." )
null
174,579
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) def append_cr(line): return line + '\n' lines = map(append_cr, lines) stream.writelines(lines) def write_requirements(cmd, basename, filename): dist = cmd.distribution data = six.StringIO() _write_requirements(data, dist.install_requires) extras_require = dist.extras_require or {} for extra in sorted(extras_require): data.write('\n[{extra}]\n'.format(**vars())) _write_requirements(data, extras_require[extra]) cmd.write_or_delete_file("requirements", filename, data.getvalue())
null
174,580
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) def append_cr(line): return line + '\n' lines = map(append_cr, lines) stream.writelines(lines) def write_setup_requirements(cmd, basename, filename): data = io.StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
null
174,581
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning def write_file(filename, contents): def write_toplevel_names(cmd, basename, filename): pkgs = dict.fromkeys( [ k.split('.', 1)[0] for k in cmd.distribution.iter_distribution_names() ] ) cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
null
174,582
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning def write_arg(cmd, basename, filename, force=False): argname = os.path.splitext(basename)[0] value = getattr(cmd.distribution, argname, None) if value is not None: value = '\n'.join(value) + '\n' cmd.write_or_delete_file(argname, filename, value, force) def overwrite_arg(cmd, basename, filename): write_arg(cmd, basename, filename, True)
null
174,583
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning class EntryPoint: """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = tuple(extras) self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", PkgResourcesDeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) from exc def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) # Get the requirements for this entry point with all its extras and # then resolve them. We have to pass `extras` along when resolving so # that the working set knows what extras we want. Otherwise, for # dist-info distributions, the working set will assume that the # requirements for that extra are purely optional and skip over them. reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer, extras=self.extras) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name] = ep return this def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def write_entries(cmd, basename, filename): ep = cmd.distribution.entry_points if isinstance(ep, six.string_types) or ep is None: data = ep elif ep is not None: data = [] for section, contents in sorted(ep.items()): if not isinstance(contents, six.string_types): contents = EntryPoint.parse_group(section, contents) contents = '\n'.join(sorted(map(str, contents.values()))) data.append('[%s]\n%s\n\n' % (section, contents)) data = ''.join(data) cmd.write_or_delete_file('entry points', filename, data, True)
null
174,584
from distutils.filelist import FileList as _FileList from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors import distutils.filelist import os import re import sys import io import warnings import time import collections from setuptools.extern import six from setuptools.extern.six.moves import map from setuptools import Command from setuptools.command.sdist import sdist from setuptools.command.sdist import walk_revctrl from setuptools.command.setopt import edit_config from setuptools.command import bdist_egg from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob from setuptools.extern import packaging from setuptools import SetuptoolsDeprecationWarning class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning): """Deprecated behavior warning for EggInfo, bypassing suppression.""" The provided code snippet includes necessary dependencies for implementing the `get_pkg_info_revision` function. Write a Python function `def get_pkg_info_revision()` to solve the following problem: Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision. Here is the function: def get_pkg_info_revision(): """ Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision. """ warnings.warn( "get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning) if os.path.exists('PKG-INFO'): with io.open('PKG-INFO') as f: for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) return 0
Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision.
174,585
from distutils import log import distutils.command.sdist as orig import os import sys import io import contextlib from setuptools.extern import six, ordered_set from .py36compat import sdist_add_defaults import pkg_resources The provided code snippet includes necessary dependencies for implementing the `walk_revctrl` function. Write a Python function `def walk_revctrl(dirname='')` to solve the following problem: Find all files under revision control Here is the function: def walk_revctrl(dirname=''): """Find all files under revision control""" for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): for item in ep.load()(dirname): yield item
Find all files under revision control
174,586
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions try: from os import chmod as _chmod except ImportError: The provided code snippet includes necessary dependencies for implementing the `samefile` function. Write a Python function `def samefile(p1, p2)` to solve the following problem: Determine if two paths reference the same file. Augments os.path.samefile to work on Windows and suppresses errors if the path doesn't exist. Here is the function: def samefile(p1, p2): """ Determine if two paths reference the same file. Augments os.path.samefile to work on Windows and suppresses errors if the path doesn't exist. """ both_exist = os.path.exists(p1) and os.path.exists(p2) use_samefile = hasattr(os.path, 'samefile') and both_exist if use_samefile: return os.path.samefile(p1, p2) norm_p1 = os.path.normpath(os.path.normcase(p1)) norm_p2 = os.path.normpath(os.path.normcase(p2)) return norm_p1 == norm_p2
Determine if two paths reference the same file. Augments os.path.samefile to work on Windows and suppresses errors if the path doesn't exist.
174,587
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if six.PY2: else: def isascii(s): try: six.text_type(s, 'ascii') return True except UnicodeError: return False
null
174,588
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def _to_bytes(s): return s.encode('utf8')
null
174,589
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def isascii(s): try: s.encode('ascii') return True except UnicodeError: return False
null
174,590
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def _one_liner(text): return textwrap.dedent(text).strip().replace('\n', '; ')
null
174,591
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def _pythonpath(): items = os.environ.get('PYTHONPATH', '').split(os.pathsep) return filter(None, items) if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions if '__pypy__' in sys.builtin_module_names: _replace_zip_directory_cache_data = \ _remove_and_clear_zip_directory_cache_data else: try: from os import chmod as _chmod except ImportError: def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath( _cygwin_patch(filename)))) The provided code snippet includes necessary dependencies for implementing the `get_site_dirs` function. Write a Python function `def get_site_dirs()` to solve the following problem: Return a list of 'site' dirs Here is the function: def get_site_dirs(): """ Return a list of 'site' dirs """ sitedirs = [] # start with PYTHONPATH sitedirs.extend(_pythonpath()) prefixes = [sys.prefix] if sys.exec_prefix != sys.prefix: prefixes.append(sys.exec_prefix) for prefix in prefixes: if prefix: if sys.platform in ('os2emx', 'riscos'): sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) elif os.sep == '/': sitedirs.extend([ os.path.join( prefix, "lib", "python{}.{}".format(*sys.version_info), "site-packages", ), os.path.join(prefix, "lib", "site-python"), ]) else: sitedirs.extend([ prefix, os.path.join(prefix, "lib", "site-packages"), ]) if sys.platform == 'darwin': # for framework builds *only* we add the standard Apple # locations. Currently only per-user, but /Library and # /Network/Library could be added too if 'Python.framework' in prefix: home = os.environ.get('HOME') if home: home_sp = os.path.join( home, 'Library', 'Python', '{}.{}'.format(*sys.version_info), 'site-packages', ) sitedirs.append(home_sp) lib_paths = get_path('purelib'), get_path('platlib') for site_lib in lib_paths: if site_lib not in sitedirs: sitedirs.append(site_lib) if site.ENABLE_USER_SITE: sitedirs.append(site.USER_SITE) try: sitedirs.extend(site.getsitepackages()) except AttributeError: pass sitedirs = list(map(normalize_path, sitedirs)) return sitedirs
Return a list of 'site' dirs
174,592
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions try: from os import chmod as _chmod except ImportError: def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath( _cygwin_patch(filename)))) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, six.string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s The provided code snippet includes necessary dependencies for implementing the `expand_paths` function. Write a Python function `def expand_paths(inputs)` to solve the following problem: Yield sys.path directories that might contain "old-style" packages Here is the function: def expand_paths(inputs): """Yield sys.path directories that might contain "old-style" packages""" seen = {} for dirname in inputs: dirname = normalize_path(dirname) if dirname in seen: continue seen[dirname] = 1 if not os.path.isdir(dirname): continue files = os.listdir(dirname) yield dirname, files for name in files: if not name.endswith('.pth'): # We only care about the .pth files continue if name in ('easy-install.pth', 'setuptools.pth'): # Ignore .pth files that we control continue # Read the .pth file f = open(os.path.join(dirname, name)) lines = list(yield_lines(f)) f.close() # Yield existing non-dupe, non-import directory lines from it for line in lines: if not line.startswith("import"): line = normalize_path(line.rstrip()) if line not in seen: seen[line] = 1 if not os.path.isdir(line): continue yield line, os.listdir(line)
Yield sys.path directories that might contain "old-style" packages
174,593
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if six.PY2: else: if '__pypy__' in sys.builtin_module_names: _replace_zip_directory_cache_data = \ _remove_and_clear_zip_directory_cache_data else: The provided code snippet includes necessary dependencies for implementing the `extract_wininst_cfg` function. Write a Python function `def extract_wininst_cfg(dist_filename)` to solve the following problem: Extract configuration data from a bdist_wininst .exe Returns a configparser.RawConfigParser, or None Here is the function: def extract_wininst_cfg(dist_filename): """Extract configuration data from a bdist_wininst .exe Returns a configparser.RawConfigParser, or None """ f = open(dist_filename, 'rb') try: endrec = zipfile._EndRecData(f) if endrec is None: return None prepended = (endrec[9] - endrec[5]) - endrec[6] if prepended < 12: # no wininst data here return None f.seek(prepended - 12) tag, cfglen, bmlen = struct.unpack("<iii", f.read(12)) if tag not in (0x1234567A, 0x1234567B): return None # not a valid tag f.seek(prepended - (12 + cfglen)) init = {'version': '', 'target_version': ''} cfg = configparser.RawConfigParser(init) try: part = f.read(cfglen) # Read up to the first null byte. config = part.split(b'\0', 1)[0] # Now the config is in bytes, but for RawConfigParser, it should # be text, so decode it. config = config.decode(sys.getfilesystemencoding()) cfg.readfp(six.StringIO(config)) except configparser.Error: return None if not cfg.has_section('metadata') or not cfg.has_section('Setup'): return None return cfg finally: f.close()
Extract configuration data from a bdist_wininst .exe Returns a configparser.RawConfigParser, or None
174,594
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if six.PY2: else: def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, six.string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s The provided code snippet includes necessary dependencies for implementing the `get_exe_prefixes` function. Write a Python function `def get_exe_prefixes(exe_filename)` to solve the following problem: Get exe->egg path translations for a given .exe file Here is the function: def get_exe_prefixes(exe_filename): """Get exe->egg path translations for a given .exe file""" prefixes = [ ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), ('PLATLIB/', ''), ('SCRIPTS/', 'EGG-INFO/scripts/'), ('DATA/lib/site-packages', ''), ] z = zipfile.ZipFile(exe_filename) try: for info in z.infolist(): name = info.filename parts = name.split('/') if len(parts) == 3 and parts[2] == 'PKG-INFO': if parts[1].endswith('.egg-info'): prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/')) break if len(parts) != 2 or not name.endswith('.pth'): continue if name.endswith('-nspkg.pth'): continue if parts[0].upper() in ('PURELIB', 'PLATLIB'): contents = z.read(name) if not six.PY2: contents = contents.decode() for pth in yield_lines(contents): pth = pth.strip().replace('\\', '/') if not pth.startswith('import'): prefixes.append((('%s/%s/' % (parts[0], pth)), '')) finally: z.close() prefixes = [(x.lower(), y) for x, y in prefixes] prefixes.sort() prefixes.reverse() return prefixes
Get exe->egg path translations for a given .exe file
174,595
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources The provided code snippet includes necessary dependencies for implementing the `_first_line_re` function. Write a Python function `def _first_line_re()` to solve the following problem: Return a regular expression based on first_line_re suitable for matching strings. Here is the function: def _first_line_re(): """ Return a regular expression based on first_line_re suitable for matching strings. """ if isinstance(first_line_re.pattern, str): return first_line_re # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. return re.compile(first_line_re.pattern.decode())
Return a regular expression based on first_line_re suitable for matching strings.
174,596
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def _uncache(normalized_path, cache): _update_zipimporter_cache(normalized_path, cache) def _remove_and_clear_zip_directory_cache_data(normalized_path): def clear_and_remove_cached_zip_archive_directory_data(path, old_entry): old_entry.clear() _update_zipimporter_cache( normalized_path, zipimport._zip_directory_cache, updater=clear_and_remove_cached_zip_archive_directory_data) if '__pypy__' in sys.builtin_module_names: _replace_zip_directory_cache_data = \ _remove_and_clear_zip_directory_cache_data else: def _replace_zip_directory_cache_data(normalized_path): def replace_cached_zip_archive_directory_data(path, old_entry): # N.B. In theory, we could load the zip directory information just # once for all updated path spellings, and then copy it locally and # update its contained path strings to contain the correct # spelling, but that seems like a way too invasive move (this cache # structure is not officially documented anywhere and could in # theory change with new Python releases) for no significant # benefit. old_entry.clear() zipimport.zipimporter(path) old_entry.update(zipimport._zip_directory_cache[path]) return old_entry _update_zipimporter_cache( normalized_path, zipimport._zip_directory_cache, updater=replace_cached_zip_archive_directory_data) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(os.path.normpath( _cygwin_patch(filename)))) The provided code snippet includes necessary dependencies for implementing the `update_dist_caches` function. Write a Python function `def update_dist_caches(dist_path, fix_zipimporter_caches)` to solve the following problem: Fix any globally cached `dist_path` related data `dist_path` should be a path of a newly installed egg distribution (zipped or unzipped). sys.path_importer_cache contains finder objects that have been cached when importing data from the original distribution. Any such finders need to be cleared since the replacement distribution might be packaged differently, e.g. a zipped egg distribution might get replaced with an unzipped egg folder or vice versa. Having the old finders cached may then cause Python to attempt loading modules from the replacement distribution using an incorrect loader. zipimport.zipimporter objects are Python loaders charged with importing data packaged inside zip archives. If stale loaders referencing the original distribution, are left behind, they can fail to load modules from the replacement distribution. E.g. if an old zipimport.zipimporter instance is used to load data from a new zipped egg archive, it may cause the operation to attempt to locate the requested data in the wrong location - one indicated by the original distribution's zip archive directory information. Such an operation may then fail outright, e.g. report having read a 'bad local file header', or even worse, it may fail silently & return invalid data. zipimport._zip_directory_cache contains cached zip archive directory information for all existing zipimport.zipimporter instances and all such instances connected to the same archive share the same cached directory information. If asked, and the underlying Python implementation allows it, we can fix all existing zipimport.zipimporter instances instead of having to track them down and remove them one by one, by updating their shared cached zip archive directory information. This, of course, assumes that the replacement distribution is packaged as a zipped egg. If not asked to fix existing zipimport.zipimporter instances, we still do our best to clear any remaining zipimport.zipimporter related cached data that might somehow later get used when attempting to load data from the new distribution and thus cause such load operations to fail. Note that when tracking down such remaining stale data, we can not catch every conceivable usage from here, and we clear only those that we know of and have found to cause problems if left alive. Any remaining caches should be updated by whomever is in charge of maintaining them, i.e. they should be ready to handle us replacing their zip archives with new distributions at runtime. Here is the function: def update_dist_caches(dist_path, fix_zipimporter_caches): """ Fix any globally cached `dist_path` related data `dist_path` should be a path of a newly installed egg distribution (zipped or unzipped). sys.path_importer_cache contains finder objects that have been cached when importing data from the original distribution. Any such finders need to be cleared since the replacement distribution might be packaged differently, e.g. a zipped egg distribution might get replaced with an unzipped egg folder or vice versa. Having the old finders cached may then cause Python to attempt loading modules from the replacement distribution using an incorrect loader. zipimport.zipimporter objects are Python loaders charged with importing data packaged inside zip archives. If stale loaders referencing the original distribution, are left behind, they can fail to load modules from the replacement distribution. E.g. if an old zipimport.zipimporter instance is used to load data from a new zipped egg archive, it may cause the operation to attempt to locate the requested data in the wrong location - one indicated by the original distribution's zip archive directory information. Such an operation may then fail outright, e.g. report having read a 'bad local file header', or even worse, it may fail silently & return invalid data. zipimport._zip_directory_cache contains cached zip archive directory information for all existing zipimport.zipimporter instances and all such instances connected to the same archive share the same cached directory information. If asked, and the underlying Python implementation allows it, we can fix all existing zipimport.zipimporter instances instead of having to track them down and remove them one by one, by updating their shared cached zip archive directory information. This, of course, assumes that the replacement distribution is packaged as a zipped egg. If not asked to fix existing zipimport.zipimporter instances, we still do our best to clear any remaining zipimport.zipimporter related cached data that might somehow later get used when attempting to load data from the new distribution and thus cause such load operations to fail. Note that when tracking down such remaining stale data, we can not catch every conceivable usage from here, and we clear only those that we know of and have found to cause problems if left alive. Any remaining caches should be updated by whomever is in charge of maintaining them, i.e. they should be ready to handle us replacing their zip archives with new distributions at runtime. """ # There are several other known sources of stale zipimport.zipimporter # instances that we do not clear here, but might if ever given a reason to # do so: # * Global setuptools pkg_resources.working_set (a.k.a. 'master working # set') may contain distributions which may in turn contain their # zipimport.zipimporter loaders. # * Several zipimport.zipimporter loaders held by local variables further # up the function call stack when running the setuptools installation. # * Already loaded modules may have their __loader__ attribute set to the # exact loader instance used when importing them. Python 3.4 docs state # that this information is intended mostly for introspection and so is # not expected to cause us problems. normalized_path = normalize_path(dist_path) _uncache(normalized_path, sys.path_importer_cache) if fix_zipimporter_caches: _replace_zip_directory_cache_data(normalized_path) else: # Here, even though we do not want to fix existing and now stale # zipimporter cache information, we still want to remove it. Related to # Python's zip archive directory information cache, we clear each of # its stale entries in two phases: # 1. Clear the entry so attempting to access zip archive information # via any existing stale zipimport.zipimporter instances fails. # 2. Remove the entry from the cache so any newly constructed # zipimport.zipimporter instances do not end up using old stale # zip archive directory information. # This whole stale data removal step does not seem strictly necessary, # but has been left in because it was done before we started replacing # the zip archive directory information cache content if possible, and # there are no relevant unit tests that we can depend on to tell us if # this is really needed. _remove_and_clear_zip_directory_cache_data(normalized_path)
Fix any globally cached `dist_path` related data `dist_path` should be a path of a newly installed egg distribution (zipped or unzipped). sys.path_importer_cache contains finder objects that have been cached when importing data from the original distribution. Any such finders need to be cleared since the replacement distribution might be packaged differently, e.g. a zipped egg distribution might get replaced with an unzipped egg folder or vice versa. Having the old finders cached may then cause Python to attempt loading modules from the replacement distribution using an incorrect loader. zipimport.zipimporter objects are Python loaders charged with importing data packaged inside zip archives. If stale loaders referencing the original distribution, are left behind, they can fail to load modules from the replacement distribution. E.g. if an old zipimport.zipimporter instance is used to load data from a new zipped egg archive, it may cause the operation to attempt to locate the requested data in the wrong location - one indicated by the original distribution's zip archive directory information. Such an operation may then fail outright, e.g. report having read a 'bad local file header', or even worse, it may fail silently & return invalid data. zipimport._zip_directory_cache contains cached zip archive directory information for all existing zipimport.zipimporter instances and all such instances connected to the same archive share the same cached directory information. If asked, and the underlying Python implementation allows it, we can fix all existing zipimport.zipimporter instances instead of having to track them down and remove them one by one, by updating their shared cached zip archive directory information. This, of course, assumes that the replacement distribution is packaged as a zipped egg. If not asked to fix existing zipimport.zipimporter instances, we still do our best to clear any remaining zipimport.zipimporter related cached data that might somehow later get used when attempting to load data from the new distribution and thus cause such load operations to fail. Note that when tracking down such remaining stale data, we can not catch every conceivable usage from here, and we clear only those that we know of and have found to cause problems if left alive. Any remaining caches should be updated by whomever is in charge of maintaining them, i.e. they should be ready to handle us replacing their zip archives with new distributions at runtime.
174,597
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources The provided code snippet includes necessary dependencies for implementing the `is_sh` function. Write a Python function `def is_sh(executable)` to solve the following problem: Determine if the specified executable is a .sh (contains a #! line) Here is the function: def is_sh(executable): """Determine if the specified executable is a .sh (contains a #! line)""" try: with io.open(executable, encoding='latin-1') as fp: magic = fp.read(2) except (OSError, IOError): return executable return magic == '#!'
Determine if the specified executable is a .sh (contains a #! line)
174,598
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources The provided code snippet includes necessary dependencies for implementing the `nt_quote_arg` function. Write a Python function `def nt_quote_arg(arg)` to solve the following problem: Quote a command line argument according to Windows parsing rules Here is the function: def nt_quote_arg(arg): """Quote a command line argument according to Windows parsing rules""" return subprocess.list2cmdline([arg])
Quote a command line argument according to Windows parsing rules
174,599
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def is_python(text, filename='<string>'): "Is this string a valid Python script?" try: compile(text, filename, 'exec') except (SyntaxError, TypeError): return False else: return True The provided code snippet includes necessary dependencies for implementing the `is_python_script` function. Write a Python function `def is_python_script(script_text, filename)` to solve the following problem: Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. Here is the function: def is_python_script(script_text, filename): """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. """ if filename.endswith('.py') or filename.endswith('.pyw'): return True # extension says it's Python if is_python(script_text, filename): return True # it's syntactically valid Python if script_text.startswith('#!'): # It begins with a '#!' line, so check if 'python' is in it somewhere return 'python' in script_text.splitlines()[0].lower() return False # Not any Python I can recognize
Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
174,600
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def is_64bit(): return struct.calcsize("P") == 8 resource_string = None The provided code snippet includes necessary dependencies for implementing the `get_win_launcher` function. Write a Python function `def get_win_launcher(type)` to solve the following problem: Load the Windows launcher (executable) suitable for launching a script. `type` should be either 'cli' or 'gui' Returns the executable as a byte string. Here is the function: def get_win_launcher(type): """ Load the Windows launcher (executable) suitable for launching a script. `type` should be either 'cli' or 'gui' Returns the executable as a byte string. """ launcher_fn = '%s.exe' % type if is_64bit(): launcher_fn = launcher_fn.replace(".", "-64.") else: launcher_fn = launcher_fn.replace(".", "-32.") return resource_string('setuptools', launcher_fn)
Load the Windows launcher (executable) suitable for launching a script. `type` should be either 'cli' or 'gui' Returns the executable as a byte string.
174,601
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if six.PY2: else: resource_string = None def load_launcher_manifest(name): manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml') if six.PY2: return manifest % vars() else: return manifest.decode('utf-8') % vars()
null
174,602
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources def auto_chmod(func, arg, exc): if func in [os.unlink, os.remove] and os.name == 'nt': chmod(arg, stat.S_IWRITE) return func(arg) et, ev, _ = sys.exc_info() six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg)))) def rmtree(path, ignore_errors=False, onerror=auto_chmod): return shutil.rmtree(path, ignore_errors, onerror)
null
174,603
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions try: from os import chmod as _chmod except ImportError: def current_umask(): tmp = os.umask(0o022) os.umask(tmp) return tmp
null
174,604
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions if '__pypy__' in sys.builtin_module_names: _replace_zip_directory_cache_data = \ _remove_and_clear_zip_directory_cache_data else: try: from os import chmod as _chmod except ImportError: def main(argv=None, **kw): def bootstrap(): # This function is called when setuptools*.egg is run using /bin/sh import setuptools argv0 = os.path.dirname(setuptools.__path__[0]) sys.argv[0] = argv0 sys.argv.append(argv0) main()
null
174,605
from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars from distutils.errors import ( DistutilsArgError, DistutilsOptionError, DistutilsError, DistutilsPlatformError, ) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re from distutils.spawn import find_executable import sys import os import zipimport import shutil import tempfile import zipfile import re import stat import random import textwrap import warnings import site import struct import contextlib import subprocess import shlex import io from sysconfig import get_config_vars, get_path from setuptools import SetuptoolsDeprecationWarning from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive from setuptools.package_index import ( PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) import pkg_resources if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions try: from os import chmod as _chmod except ImportError: def _patch_usage(): import distutils.core USAGE = textwrap.dedent(""" usage: %(script)s [options] requirement_or_url ... or: %(script)s --help """).lstrip() def gen_usage(script_name): return USAGE % dict( script=os.path.basename(script_name), ) saved = distutils.core.gen_usage distutils.core.gen_usage = gen_usage try: yield finally: distutils.core.gen_usage = saved
null
174,606
from distutils.util import convert_path from distutils import log from distutils.errors import DistutilsOptionError import distutils import os from setuptools.extern.six.moves import configparser from setuptools import Command The provided code snippet includes necessary dependencies for implementing the `edit_config` function. Write a Python function `def edit_config(filename, settings, dry_run=False)` to solve the following problem: Edit a configuration file to include `settings` `settings` is a dictionary of dictionaries or ``None`` values, keyed by command/section name. A ``None`` value means to delete the entire section, while a dictionary lists settings to be changed or deleted in that section. A setting of ``None`` means to delete that setting. Here is the function: def edit_config(filename, settings, dry_run=False): """Edit a configuration file to include `settings` `settings` is a dictionary of dictionaries or ``None`` values, keyed by command/section name. A ``None`` value means to delete the entire section, while a dictionary lists settings to be changed or deleted in that section. A setting of ``None`` means to delete that setting. """ log.debug("Reading configuration from %s", filename) opts = configparser.RawConfigParser() opts.read([filename]) for section, options in settings.items(): if options is None: log.info("Deleting section [%s] from %s", section, filename) opts.remove_section(section) else: if not opts.has_section(section): log.debug("Adding new section [%s] to %s", section, filename) opts.add_section(section) for option, value in options.items(): if value is None: log.debug( "Deleting %s.%s from %s", section, option, filename ) opts.remove_option(section, option) if not opts.options(section): log.info("Deleting empty [%s] section from %s", section, filename) opts.remove_section(section) else: log.debug( "Setting %s.%s to %r in %s", section, option, value, filename ) opts.set(section, option, value) log.info("Writing %s", filename) if not dry_run: with open(filename, 'w') as f: opts.write(f)
Edit a configuration file to include `settings` `settings` is a dictionary of dictionaries or ``None`` values, keyed by command/section name. A ``None`` value means to delete the entire section, while a dictionary lists settings to be changed or deleted in that section. A setting of ``None`` means to delete that setting.
174,607
import json from io import open from os import listdir, pathsep from os.path import join, isfile, isdir, dirname import sys import platform import itertools import subprocess import distutils.errors from setuptools.extern.packaging.version import LegacyVersion from setuptools.extern.six.moves import filterfalse from .monkey import get_unpatched def get_unpatched(item): lookup = ( get_unpatched_class if isinstance(item, six.class_types) else get_unpatched_function if isinstance(item, types.FunctionType) else lambda item: None ) return lookup(item) The provided code snippet includes necessary dependencies for implementing the `msvc9_find_vcvarsall` function. Write a Python function `def msvc9_find_vcvarsall(version)` to solve the following problem: Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone compiler build for Python (VCForPython / Microsoft Visual C++ Compiler for Python 2.7). Fall back to original behavior when the standalone compiler is not available. Redirect the path of "vcvarsall.bat". Parameters ---------- version: float Required Microsoft Visual C++ version. Return ------ str vcvarsall.bat path Here is the function: def msvc9_find_vcvarsall(version): """ Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone compiler build for Python (VCForPython / Microsoft Visual C++ Compiler for Python 2.7). Fall back to original behavior when the standalone compiler is not available. Redirect the path of "vcvarsall.bat". Parameters ---------- version: float Required Microsoft Visual C++ version. Return ------ str vcvarsall.bat path """ vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' key = vc_base % ('', version) try: # Per-user installs register the compiler path here productdir = Reg.get_value(key, "installdir") except KeyError: try: # All-user installs on a 64-bit system register here key = vc_base % ('Wow6432Node\\', version) productdir = Reg.get_value(key, "installdir") except KeyError: productdir = None if productdir: vcvarsall = join(productdir, "vcvarsall.bat") if isfile(vcvarsall): return vcvarsall return get_unpatched(msvc9_find_vcvarsall)(version)
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone compiler build for Python (VCForPython / Microsoft Visual C++ Compiler for Python 2.7). Fall back to original behavior when the standalone compiler is not available. Redirect the path of "vcvarsall.bat". Parameters ---------- version: float Required Microsoft Visual C++ version. Return ------ str vcvarsall.bat path
174,608
import json from io import open from os import listdir, pathsep from os.path import join, isfile, isdir, dirname import sys import platform import itertools import subprocess import distutils.errors from setuptools.extern.packaging.version import LegacyVersion from setuptools.extern.six.moves import filterfalse from .monkey import get_unpatched try: from distutils.msvc9compiler import Reg except _msvc9_suppress_errors: pass def _augment_exception(exc, version, arch=''): """ Add details to the exception message to help guide the user as to what action will resolve it. """ # Error if MSVC++ directory not found or environment not set message = exc.args[0] if "vcvarsall" in message.lower() or "visual c" in message.lower(): # Special error message if MSVC++ not installed tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' message = tmpl.format(**locals()) msdownload = 'www.microsoft.com/download/details.aspx?id=%d' if version == 9.0: if arch.lower().find('ia64') > -1: # For VC++ 9.0, if IA64 support is needed, redirect user # to Windows SDK 7.0. # Note: No download link available from Microsoft. message += ' Get it with "Microsoft Windows SDK 7.0"' else: # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : # This redirection link is maintained by Microsoft. # Contact vspython@microsoft.com if it needs updating. message += ' Get it from http://aka.ms/vcpython27' elif version == 10.0: # For VC++ 10.0 Redirect user to Windows SDK 7.1 message += ' Get it with "Microsoft Windows SDK 7.1": ' message += msdownload % 8279 elif version >= 14.0: # For VC++ 14.X Redirect user to latest Visual C++ Build Tools message += (' Get it with "Build Tools for Visual Studio": ' r'https://visualstudio.microsoft.com/downloads/') exc.args = (message, ) class EnvironmentInfo: """ Return environment variables for specified Microsoft Visual C++ version and platform : Lib, Include, Path and libpath. This function is compatible with Microsoft Visual C++ 9.0 to 14.X. Script created by analysing Microsoft environment configuration files like "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... Parameters ---------- arch: str Target architecture. vc_ver: float Required Microsoft Visual C++ version. If not set, autodetect the last version. vc_min_ver: float Minimum Microsoft Visual C++ version. """ # Variables and properties in this class use originals CamelCase variables # names from Microsoft source files for more easy comparison. def __init__(self, arch, vc_ver=None, vc_min_ver=0): self.pi = PlatformInfo(arch) self.ri = RegistryInfo(self.pi) self.si = SystemInfo(self.ri, vc_ver) if self.vc_ver < vc_min_ver: err = 'No suitable Microsoft Visual C++ version found' raise distutils.errors.DistutilsPlatformError(err) def vs_ver(self): """ Microsoft Visual Studio. Return ------ float version """ return self.si.vs_ver def vc_ver(self): """ Microsoft Visual C++ version. Return ------ float version """ return self.si.vc_ver def VSTools(self): """ Microsoft Visual Studio Tools. Return ------ list of str paths """ paths = [r'Common7\IDE', r'Common7\Tools'] if self.vs_ver >= 14.0: arch_subdir = self.pi.current_dir(hidex86=True, x64=True) paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] paths += [r'Team Tools\Performance Tools'] paths += [r'Team Tools\Performance Tools%s' % arch_subdir] return [join(self.si.VSInstallDir, path) for path in paths] def VCIncludes(self): """ Microsoft Visual C++ & Microsoft Foundation Class Includes. Return ------ list of str paths """ return [join(self.si.VCInstallDir, 'Include'), join(self.si.VCInstallDir, r'ATLMFC\Include')] def VCLibraries(self): """ Microsoft Visual C++ & Microsoft Foundation Class Libraries. Return ------ list of str paths """ if self.vs_ver >= 15.0: arch_subdir = self.pi.target_dir(x64=True) else: arch_subdir = self.pi.target_dir(hidex86=True) paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] if self.vs_ver >= 14.0: paths += [r'Lib\store%s' % arch_subdir] return [join(self.si.VCInstallDir, path) for path in paths] def VCStoreRefs(self): """ Microsoft Visual C++ store references Libraries. Return ------ list of str paths """ if self.vs_ver < 14.0: return [] return [join(self.si.VCInstallDir, r'Lib\store\references')] def VCTools(self): """ Microsoft Visual C++ Tools. Return ------ list of str paths """ si = self.si tools = [join(si.VCInstallDir, 'VCPackages')] forcex86 = True if self.vs_ver <= 10.0 else False arch_subdir = self.pi.cross_dir(forcex86) if arch_subdir: tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)] if self.vs_ver == 14.0: path = 'Bin%s' % self.pi.current_dir(hidex86=True) tools += [join(si.VCInstallDir, path)] elif self.vs_ver >= 15.0: host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else r'bin\HostX64%s') tools += [join( si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] if self.pi.current_cpu != self.pi.target_cpu: tools += [join( si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] else: tools += [join(si.VCInstallDir, 'Bin')] return tools def OSLibraries(self): """ Microsoft Windows SDK Libraries. Return ------ list of str paths """ if self.vs_ver <= 10.0: arch_subdir = self.pi.target_dir(hidex86=True, x64=True) return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] else: arch_subdir = self.pi.target_dir(x64=True) lib = join(self.si.WindowsSdkDir, 'lib') libver = self._sdk_subdir return [join(lib, '%sum%s' % (libver, arch_subdir))] def OSIncludes(self): """ Microsoft Windows SDK Include. Return ------ list of str paths """ include = join(self.si.WindowsSdkDir, 'include') if self.vs_ver <= 10.0: return [include, join(include, 'gl')] else: if self.vs_ver >= 14.0: sdkver = self._sdk_subdir else: sdkver = '' return [join(include, '%sshared' % sdkver), join(include, '%sum' % sdkver), join(include, '%swinrt' % sdkver)] def OSLibpath(self): """ Microsoft Windows SDK Libraries Paths. Return ------ list of str paths """ ref = join(self.si.WindowsSdkDir, 'References') libpath = [] if self.vs_ver <= 9.0: libpath += self.OSLibraries if self.vs_ver >= 11.0: libpath += [join(ref, r'CommonConfiguration\Neutral')] if self.vs_ver >= 14.0: libpath += [ ref, join(self.si.WindowsSdkDir, 'UnionMetadata'), join( ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'), join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'), join( ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0'), join( self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration', 'neutral'), ] return libpath def SdkTools(self): """ Microsoft Windows SDK Tools. Return ------ list of str paths """ return list(self._sdk_tools()) def _sdk_tools(self): """ Microsoft Windows SDK Tools paths generator. Return ------ generator of str paths """ if self.vs_ver < 15.0: bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86' yield join(self.si.WindowsSdkDir, bin_dir) if not self.pi.current_is_x86(): arch_subdir = self.pi.current_dir(x64=True) path = 'Bin%s' % arch_subdir yield join(self.si.WindowsSdkDir, path) if self.vs_ver in (10.0, 11.0): if self.pi.target_is_x86(): arch_subdir = '' else: arch_subdir = self.pi.current_dir(hidex86=True, x64=True) path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir yield join(self.si.WindowsSdkDir, path) elif self.vs_ver >= 15.0: path = join(self.si.WindowsSdkDir, 'Bin') arch_subdir = self.pi.current_dir(x64=True) sdkver = self.si.WindowsSdkLastVersion yield join(path, '%s%s' % (sdkver, arch_subdir)) if self.si.WindowsSDKExecutablePath: yield self.si.WindowsSDKExecutablePath def _sdk_subdir(self): """ Microsoft Windows SDK version subdir. Return ------ str subdir """ ucrtver = self.si.WindowsSdkLastVersion return ('%s\\' % ucrtver) if ucrtver else '' def SdkSetup(self): """ Microsoft Windows SDK Setup. Return ------ list of str paths """ if self.vs_ver > 9.0: return [] return [join(self.si.WindowsSdkDir, 'Setup')] def FxTools(self): """ Microsoft .NET Framework Tools. Return ------ list of str paths """ pi = self.pi si = self.si if self.vs_ver <= 10.0: include32 = True include64 = not pi.target_is_x86() and not pi.current_is_x86() else: include32 = pi.target_is_x86() or pi.current_is_x86() include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' tools = [] if include32: tools += [join(si.FrameworkDir32, ver) for ver in si.FrameworkVersion32] if include64: tools += [join(si.FrameworkDir64, ver) for ver in si.FrameworkVersion64] return tools def NetFxSDKLibraries(self): """ Microsoft .Net Framework SDK Libraries. Return ------ list of str paths """ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: return [] arch_subdir = self.pi.target_dir(x64=True) return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] def NetFxSDKIncludes(self): """ Microsoft .Net Framework SDK Includes. Return ------ list of str paths """ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir: return [] return [join(self.si.NetFxSdkDir, r'include\um')] def VsTDb(self): """ Microsoft Visual Studio Team System Database. Return ------ list of str paths """ return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')] def MSBuild(self): """ Microsoft Build Engine. Return ------ list of str paths """ if self.vs_ver < 12.0: return [] elif self.vs_ver < 15.0: base_path = self.si.ProgramFilesx86 arch_subdir = self.pi.current_dir(hidex86=True) else: base_path = self.si.VSInstallDir arch_subdir = '' path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir) build = [join(base_path, path)] if self.vs_ver >= 15.0: # Add Roslyn C# & Visual Basic Compiler build += [join(base_path, path, 'Roslyn')] return build def HTMLHelpWorkshop(self): """ Microsoft HTML Help Workshop. Return ------ list of str paths """ if self.vs_ver < 11.0: return [] return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')] def UCRTLibraries(self): """ Microsoft Universal C Runtime SDK Libraries. Return ------ list of str paths """ if self.vs_ver < 14.0: return [] arch_subdir = self.pi.target_dir(x64=True) lib = join(self.si.UniversalCRTSdkDir, 'lib') ucrtver = self._ucrt_subdir return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] def UCRTIncludes(self): """ Microsoft Universal C Runtime SDK Include. Return ------ list of str paths """ if self.vs_ver < 14.0: return [] include = join(self.si.UniversalCRTSdkDir, 'include') return [join(include, '%sucrt' % self._ucrt_subdir)] def _ucrt_subdir(self): """ Microsoft Universal C Runtime SDK version subdir. Return ------ str subdir """ ucrtver = self.si.UniversalCRTSdkLastVersion return ('%s\\' % ucrtver) if ucrtver else '' def FSharp(self): """ Microsoft Visual F#. Return ------ list of str paths """ if 11.0 > self.vs_ver > 12.0: return [] return [self.si.FSharpInstallDir] def VCRuntimeRedist(self): """ Microsoft Visual C++ runtime redistributable dll. Return ------ str path """ vcruntime = 'vcruntime%d0.dll' % self.vc_ver arch_subdir = self.pi.target_dir(x64=True).strip('\\') # Installation prefixes candidates prefixes = [] tools_path = self.si.VCInstallDir redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist')) if isdir(redist_path): # Redist version may not be exactly the same as tools redist_path = join(redist_path, listdir(redist_path)[-1]) prefixes += [redist_path, join(redist_path, 'onecore')] prefixes += [join(tools_path, 'redist')] # VS14 legacy path # CRT directory crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10), # Sometime store in directory with VS version instead of VC 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10)) # vcruntime path for prefix, crt_dir in itertools.product(prefixes, crt_dirs): path = join(prefix, arch_subdir, crt_dir, vcruntime) if isfile(path): return path def return_env(self, exists=True): """ Return environment dict. Parameters ---------- exists: bool It True, only return existing paths. Return ------ dict environment """ env = dict( include=self._build_paths('include', [self.VCIncludes, self.OSIncludes, self.UCRTIncludes, self.NetFxSDKIncludes], exists), lib=self._build_paths('lib', [self.VCLibraries, self.OSLibraries, self.FxTools, self.UCRTLibraries, self.NetFxSDKLibraries], exists), libpath=self._build_paths('libpath', [self.VCLibraries, self.FxTools, self.VCStoreRefs, self.OSLibpath], exists), path=self._build_paths('path', [self.VCTools, self.VSTools, self.VsTDb, self.SdkTools, self.SdkSetup, self.FxTools, self.MSBuild, self.HTMLHelpWorkshop, self.FSharp], exists), ) if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist): env['py_vcruntime_redist'] = self.VCRuntimeRedist return env def _build_paths(self, name, spec_path_lists, exists): """ Given an environment variable name and specified paths, return a pathsep-separated string of paths containing unique, extant, directories from those paths and from the environment variable. Raise an error if no paths are resolved. Parameters ---------- name: str Environment variable name spec_path_lists: list of str Paths exists: bool It True, only return existing paths. Return ------ str Pathsep-separated paths """ # flatten spec_path_lists spec_paths = itertools.chain.from_iterable(spec_path_lists) env_paths = environ.get(name, '').split(pathsep) paths = itertools.chain(spec_paths, env_paths) extant_paths = list(filter(isdir, paths)) if exists else paths if not extant_paths: msg = "%s environment variable is empty" % name.upper() raise distutils.errors.DistutilsPlatformError(msg) unique_paths = self._unique_everseen(extant_paths) return pathsep.join(unique_paths) # from Python docs def _unique_everseen(iterable, key=None): """ List unique elements, preserving order. Remember all elements ever seen. _unique_everseen('AAAABBBCCDAABBB') --> A B C D _unique_everseen('ABBCcAD', str.lower) --> A B C D """ seen = set() seen_add = seen.add if key is None: for element in filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element def get_unpatched(item): lookup = ( get_unpatched_class if isinstance(item, six.class_types) else get_unpatched_function if isinstance(item, types.FunctionType) else lambda item: None ) return lookup(item) The provided code snippet includes necessary dependencies for implementing the `msvc9_query_vcvarsall` function. Write a Python function `def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs)` to solve the following problem: Patched "distutils.msvc9compiler.query_vcvarsall" for support extra Microsoft Visual C++ 9.0 and 10.0 compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- ver: float Required Microsoft Visual C++ version. arch: str Target architecture. Return ------ dict environment Here is the function: def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): """ Patched "distutils.msvc9compiler.query_vcvarsall" for support extra Microsoft Visual C++ 9.0 and 10.0 compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- ver: float Required Microsoft Visual C++ version. arch: str Target architecture. Return ------ dict environment """ # Try to get environment from vcvarsall.bat (Classical way) try: orig = get_unpatched(msvc9_query_vcvarsall) return orig(ver, arch, *args, **kwargs) except distutils.errors.DistutilsPlatformError: # Pass error if Vcvarsall.bat is missing pass except ValueError: # Pass error if environment not set after executing vcvarsall.bat pass # If error, try to set environment directly try: return EnvironmentInfo(arch, ver).return_env() except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, ver, arch) raise
Patched "distutils.msvc9compiler.query_vcvarsall" for support extra Microsoft Visual C++ 9.0 and 10.0 compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- ver: float Required Microsoft Visual C++ version. arch: str Target architecture. Return ------ dict environment
174,609
import json from io import open from os import listdir, pathsep from os.path import join, isfile, isdir, dirname import sys import platform import itertools import subprocess import distutils.errors from setuptools.extern.packaging.version import LegacyVersion from setuptools.extern.six.moves import filterfalse from .monkey import get_unpatched try: from distutils.msvc9compiler import Reg except _msvc9_suppress_errors: pass def _msvc14_get_vc_env(plat_spec): """Python 3.8 "distutils/_msvccompiler.py" backport""" if "DISTUTILS_USE_SDK" in environ: return { key.lower(): value for key, value in environ.items() } vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec) if not vcvarsall: raise distutils.errors.DistutilsPlatformError( "Unable to find vcvarsall.bat" ) try: out = subprocess.check_output( 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), stderr=subprocess.STDOUT, ).decode('utf-16le', errors='replace') except subprocess.CalledProcessError as exc: raise distutils.errors.DistutilsPlatformError( "Error executing {}".format(exc.cmd) ) from exc env = { key.lower(): value for key, _, value in (line.partition('=') for line in out.splitlines()) if key and value } if vcruntime: env['py_vcruntime_redist'] = vcruntime return env def _augment_exception(exc, version, arch=''): """ Add details to the exception message to help guide the user as to what action will resolve it. """ # Error if MSVC++ directory not found or environment not set message = exc.args[0] if "vcvarsall" in message.lower() or "visual c" in message.lower(): # Special error message if MSVC++ not installed tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' message = tmpl.format(**locals()) msdownload = 'www.microsoft.com/download/details.aspx?id=%d' if version == 9.0: if arch.lower().find('ia64') > -1: # For VC++ 9.0, if IA64 support is needed, redirect user # to Windows SDK 7.0. # Note: No download link available from Microsoft. message += ' Get it with "Microsoft Windows SDK 7.0"' else: # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : # This redirection link is maintained by Microsoft. # Contact vspython@microsoft.com if it needs updating. message += ' Get it from http://aka.ms/vcpython27' elif version == 10.0: # For VC++ 10.0 Redirect user to Windows SDK 7.1 message += ' Get it with "Microsoft Windows SDK 7.1": ' message += msdownload % 8279 elif version >= 14.0: # For VC++ 14.X Redirect user to latest Visual C++ Build Tools message += (' Get it with "Build Tools for Visual Studio": ' r'https://visualstudio.microsoft.com/downloads/') exc.args = (message, ) The provided code snippet includes necessary dependencies for implementing the `msvc14_get_vc_env` function. Write a Python function `def msvc14_get_vc_env(plat_spec)` to solve the following problem: Patched "distutils._msvccompiler._get_vc_env" for support extra Microsoft Visual C++ 14.X compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- plat_spec: str Target architecture. Return ------ dict environment Here is the function: def msvc14_get_vc_env(plat_spec): """ Patched "distutils._msvccompiler._get_vc_env" for support extra Microsoft Visual C++ 14.X compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- plat_spec: str Target architecture. Return ------ dict environment """ # Always use backport from CPython 3.8 try: return _msvc14_get_vc_env(plat_spec) except distutils.errors.DistutilsPlatformError as exc: _augment_exception(exc, 14.0) raise
Patched "distutils._msvccompiler._get_vc_env" for support extra Microsoft Visual C++ 14.X compilers. Set environment without use of "vcvarsall.bat". Parameters ---------- plat_spec: str Target architecture. Return ------ dict environment
174,610
import json from io import open from os import listdir, pathsep from os.path import join, isfile, isdir, dirname import sys import platform import itertools import subprocess import distutils.errors from setuptools.extern.packaging.version import LegacyVersion from setuptools.extern.six.moves import filterfalse from .monkey import get_unpatched try: from distutils.msvc9compiler import Reg except _msvc9_suppress_errors: pass def get_unpatched(item): lookup = ( get_unpatched_class if isinstance(item, six.class_types) else get_unpatched_function if isinstance(item, types.FunctionType) else lambda item: None ) return lookup(item) The provided code snippet includes necessary dependencies for implementing the `msvc14_gen_lib_options` function. Write a Python function `def msvc14_gen_lib_options(*args, **kwargs)` to solve the following problem: Patched "distutils._msvccompiler.gen_lib_options" for fix compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2) Here is the function: def msvc14_gen_lib_options(*args, **kwargs): """ Patched "distutils._msvccompiler.gen_lib_options" for fix compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2) """ if "numpy.distutils" in sys.modules: import numpy as np if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
Patched "distutils._msvccompiler.gen_lib_options" for fix compatibility between "numpy.distutils" and "distutils._msvccompiler" (for Numpy < 1.11.2)
174,611
import io import os import sys import tokenize import shutil import contextlib import setuptools import distutils from setuptools.py31compat import TemporaryDirectory from pkg_resources import parse_requirements The provided code snippet includes necessary dependencies for implementing the `_to_str` function. Write a Python function `def _to_str(s)` to solve the following problem: Convert a filename to a string (on Python 2, explicitly a byte string, not Unicode) as distutils checks for the exact type str. Here is the function: def _to_str(s): """ Convert a filename to a string (on Python 2, explicitly a byte string, not Unicode) as distutils checks for the exact type str. """ if sys.version_info[0] == 2 and not isinstance(s, str): # Assume it's Unicode, as that's what the PEP says # should be provided. return s.encode(sys.getfilesystemencoding()) return s
Convert a filename to a string (on Python 2, explicitly a byte string, not Unicode) as distutils checks for the exact type str.
174,612
import io import os import sys import tokenize import shutil import contextlib import setuptools import distutils from setuptools.py31compat import TemporaryDirectory from pkg_resources import parse_requirements def _get_immediate_subdirectories(a_dir): return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
null
174,613
import io import os import sys import tokenize import shutil import contextlib import setuptools import distutils from setuptools.py31compat import TemporaryDirectory from pkg_resources import parse_requirements def _file_with_extension(directory, extension): matching = ( f for f in os.listdir(directory) if f.endswith(extension) ) file, = matching return file
null
174,614
import io import os import sys import tokenize import shutil import contextlib import setuptools import distutils from setuptools.py31compat import TemporaryDirectory from pkg_resources import parse_requirements def _open_setup_script(setup_script): if not os.path.exists(setup_script): # Supply a default setup.py return io.StringIO(u"from setuptools import setup; setup()") return getattr(tokenize, 'open', open)(setup_script)
null
174,615
import sys import marshal import contextlib from distutils.version import StrictVersion from .py33compat import Bytecode from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE from . import py27compat def maybe_close(f): def empty(): yield return if not f: return empty() return contextlib.closing(f) def extract_constant(code, symbol, default=-1): """Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ if symbol not in code.co_names: # name's not there, can't possibly be an assignment return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for byte_code in Bytecode(code): op = byte_code.opcode arg = byte_code.arg if op == LOAD_CONST: const = code.co_consts[arg] elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): return const else: const = default The provided code snippet includes necessary dependencies for implementing the `get_module_constant` function. Write a Python function `def get_module_constant(module, symbol, default=-1, paths=None)` to solve the following problem: Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define 'symbol'. If the module defines 'symbol' as a constant, return the constant. Otherwise, return 'default'. Here is the function: def get_module_constant(module, symbol, default=-1, paths=None): """Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define 'symbol'. If the module defines 'symbol' as a constant, return the constant. Otherwise, return 'default'.""" try: f, path, (suffix, mode, kind) = info = find_module(module, paths) except ImportError: # Module doesn't exist return None with maybe_close(f): if kind == PY_COMPILED: f.read(8) # skip magic & date code = marshal.load(f) elif kind == PY_FROZEN: code = py27compat.get_frozen_object(module, paths) elif kind == PY_SOURCE: code = compile(f.read(), path, 'exec') else: # Not something we can parse; we'll have to import it. :( imported = py27compat.get_module(module, paths, info) return getattr(imported, symbol, None) return extract_constant(code, symbol, default)
Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define 'symbol'. If the module defines 'symbol' as a constant, return the constant. Otherwise, return 'default'.
174,616
import sys import marshal import contextlib from distutils.version import StrictVersion from .py33compat import Bytecode from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE from . import py27compat __all__ = [ 'Require', 'find_module', 'get_module_constant', 'extract_constant' ] The provided code snippet includes necessary dependencies for implementing the `_update_globals` function. Write a Python function `def _update_globals()` to solve the following problem: Patch the globals to remove the objects not available on some platforms. XXX it'd be better to test assertions about bytecode instead. Here is the function: def _update_globals(): """ Patch the globals to remove the objects not available on some platforms. XXX it'd be better to test assertions about bytecode instead. """ if not sys.platform.startswith('java') and sys.platform != 'cli': return incompatible = 'extract_constant', 'get_module_constant' for name in incompatible: del globals()[name] __all__.remove(name)
Patch the globals to remove the objects not available on some platforms. XXX it'd be better to test assertions about bytecode instead.
174,617
import zipfile import tarfile import os import shutil import posixpath import contextlib from distutils.errors import DistutilsError from pkg_resources import ensure_directory class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" def default_filter(src, dst): """The default progress/filter callback; returns True for all files""" return dst extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile The provided code snippet includes necessary dependencies for implementing the `unpack_archive` function. Write a Python function `def unpack_archive( filename, extract_dir, progress_filter=default_filter, drivers=None)` to solve the following problem: Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order. Here is the function: def unpack_archive( filename, extract_dir, progress_filter=default_filter, drivers=None): """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order. """ for driver in drivers or extraction_drivers: try: driver(filename, extract_dir, progress_filter) except UnrecognizedFormat: continue else: return else: raise UnrecognizedFormat( "Not a recognized archive type: %s" % filename )
Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` `progress_filter` is a function taking two arguments: a source path internal to the archive ('/'-separated), and a filesystem path where it will be extracted. The callback must return the desired extract path (which may be the same as the one passed in), or else ``None`` to skip that file or directory. The callback can thus be used to report on the progress of the extraction, as well as to filter the items extracted or alter their extraction paths. `drivers`, if supplied, must be a non-empty sequence of functions with the same signature as this function (minus the `drivers` argument), that raise ``UnrecognizedFormat`` if they do not support extracting the designated archive type. The `drivers` are tried in sequence until one is found that does not raise an error, or until all are exhausted (in which case ``UnrecognizedFormat`` is raised). If you do not supply a sequence of drivers, the module's ``extraction_drivers`` constant will be used, which means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that order.
174,618
import zipfile import tarfile import os import shutil import posixpath import contextlib from distutils.errors import DistutilsError from pkg_resources import ensure_directory class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" def default_filter(src, dst): """The default progress/filter callback; returns True for all files""" return dst def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) os.makedirs(dirname, exist_ok=True) The provided code snippet includes necessary dependencies for implementing the `unpack_directory` function. Write a Python function `def unpack_directory(filename, extract_dir, progress_filter=default_filter)` to solve the following problem: Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory Here is the function: def unpack_directory(filename, extract_dir, progress_filter=default_filter): """"Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory """ if not os.path.isdir(filename): raise UnrecognizedFormat("%s is not a directory" % filename) paths = { filename: ('', extract_dir), } for base, dirs, files in os.walk(filename): src, dst = paths[base] for d in dirs: paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) for f in files: target = os.path.join(dst, f) target = progress_filter(src + f, target) if not target: # skip non-files continue ensure_directory(target) f = os.path.join(base, f) shutil.copyfile(f, target) shutil.copystat(f, target)
Unpack" a directory, using the same interface as for archives Raises ``UnrecognizedFormat`` if `filename` is not a directory