ZTWHHH commited on
Commit
ab0b11d
·
verified ·
1 Parent(s): 1b8c8b1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vlmpy310/lib/python3.10/site-packages/bs4/__init__.py +840 -0
  2. vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/css.cpython-310.pyc +0 -0
  3. vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/dammit.cpython-310.pyc +0 -0
  4. vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/diagnose.cpython-310.pyc +0 -0
  5. vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/formatter.cpython-310.pyc +0 -0
  6. vlmpy310/lib/python3.10/site-packages/bs4/builder/__pycache__/__init__.cpython-310.pyc +0 -0
  7. vlmpy310/lib/python3.10/site-packages/bs4/builder/__pycache__/_lxml.cpython-310.pyc +0 -0
  8. vlmpy310/lib/python3.10/site-packages/bs4/css.py +280 -0
  9. vlmpy310/lib/python3.10/site-packages/bs4/dammit.py +1095 -0
  10. vlmpy310/lib/python3.10/site-packages/bs4/diagnose.py +233 -0
  11. vlmpy310/lib/python3.10/site-packages/bs4/element.py +2435 -0
  12. vlmpy310/lib/python3.10/site-packages/bs4/formatter.py +185 -0
  13. vlmpy310/lib/python3.10/site-packages/bs4/tests/__init__.py +1177 -0
  14. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  15. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_dammit.cpython-310.pyc +0 -0
  16. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_element.cpython-310.pyc +0 -0
  17. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_formatter.cpython-310.pyc +0 -0
  18. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_fuzz.cpython-310.pyc +0 -0
  19. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_html5lib.cpython-310.pyc +0 -0
  20. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_lxml.cpython-310.pyc +0 -0
  21. vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_navigablestring.cpython-310.pyc +0 -0
  22. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase +1 -0
  23. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase +1 -0
  24. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase +1 -0
  25. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase +0 -0
  26. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase +0 -0
  27. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase +0 -0
  28. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase +1 -0
  29. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase +0 -0
  30. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase +2 -0
  31. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase +1 -0
  32. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase +1 -0
  33. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase +1 -0
  34. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase +1 -0
  35. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase +0 -0
  36. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase +0 -0
  37. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase +0 -0
  38. vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase +0 -0
  39. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_builder.py +29 -0
  40. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_builder_registry.py +137 -0
  41. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_css.py +487 -0
  42. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_dammit.py +370 -0
  43. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_docs.py +38 -0
  44. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_element.py +74 -0
  45. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_formatter.py +113 -0
  46. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_fuzz.py +176 -0
  47. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_html5lib.py +224 -0
  48. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_htmlparser.py +148 -0
  49. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_lxml.py +203 -0
  50. vlmpy310/lib/python3.10/site-packages/bs4/tests/test_navigablestring.py +144 -0
vlmpy310/lib/python3.10/site-packages/bs4/__init__.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
2
+
3
+ http://www.crummy.com/software/BeautifulSoup/
4
+
5
+ Beautiful Soup uses a pluggable XML or HTML parser to parse a
6
+ (possibly invalid) document into a tree representation. Beautiful Soup
7
+ provides methods and Pythonic idioms that make it easy to navigate,
8
+ search, and modify the parse tree.
9
+
10
+ Beautiful Soup works with Python 3.6 and up. It works better if lxml
11
+ and/or html5lib is installed.
12
+
13
+ For more than you ever wanted to know about Beautiful Soup, see the
14
+ documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
15
+ """
16
+
17
+ __author__ = "Leonard Richardson (leonardr@segfault.org)"
18
+ __version__ = "4.12.3"
19
+ __copyright__ = "Copyright (c) 2004-2024 Leonard Richardson"
20
+ # Use of this source code is governed by the MIT license.
21
+ __license__ = "MIT"
22
+
23
+ __all__ = ['BeautifulSoup']
24
+
25
+ from collections import Counter
26
+ import os
27
+ import re
28
+ import sys
29
+ import traceback
30
+ import warnings
31
+
32
+ # The very first thing we do is give a useful error if someone is
33
+ # running this code under Python 2.
34
+ if sys.version_info.major < 3:
35
+ raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
36
+
37
+ from .builder import (
38
+ builder_registry,
39
+ ParserRejectedMarkup,
40
+ XMLParsedAsHTMLWarning,
41
+ HTMLParserTreeBuilder
42
+ )
43
+ from .dammit import UnicodeDammit
44
+ from .element import (
45
+ CData,
46
+ Comment,
47
+ CSS,
48
+ DEFAULT_OUTPUT_ENCODING,
49
+ Declaration,
50
+ Doctype,
51
+ NavigableString,
52
+ PageElement,
53
+ ProcessingInstruction,
54
+ PYTHON_SPECIFIC_ENCODINGS,
55
+ ResultSet,
56
+ Script,
57
+ Stylesheet,
58
+ SoupStrainer,
59
+ Tag,
60
+ TemplateString,
61
+ )
62
+
63
+ # Define some custom warnings.
64
+ class GuessedAtParserWarning(UserWarning):
65
+ """The warning issued when BeautifulSoup has to guess what parser to
66
+ use -- probably because no parser was specified in the constructor.
67
+ """
68
+
69
+ class MarkupResemblesLocatorWarning(UserWarning):
70
+ """The warning issued when BeautifulSoup is given 'markup' that
71
+ actually looks like a resource locator -- a URL or a path to a file
72
+ on disk.
73
+ """
74
+
75
+
76
+ class BeautifulSoup(Tag):
77
+ """A data structure representing a parsed HTML or XML document.
78
+
79
+ Most of the methods you'll call on a BeautifulSoup object are inherited from
80
+ PageElement or Tag.
81
+
82
+ Internally, this class defines the basic interface called by the
83
+ tree builders when converting an HTML/XML document into a data
84
+ structure. The interface abstracts away the differences between
85
+ parsers. To write a new tree builder, you'll need to understand
86
+ these methods as a whole.
87
+
88
+ These methods will be called by the BeautifulSoup constructor:
89
+ * reset()
90
+ * feed(markup)
91
+
92
+ The tree builder may call these methods from its feed() implementation:
93
+ * handle_starttag(name, attrs) # See note about return value
94
+ * handle_endtag(name)
95
+ * handle_data(data) # Appends to the current data node
96
+ * endData(containerClass) # Ends the current data node
97
+
98
+ No matter how complicated the underlying parser is, you should be
99
+ able to build a tree using 'start tag' events, 'end tag' events,
100
+ 'data' events, and "done with data" events.
101
+
102
+ If you encounter an empty-element tag (aka a self-closing tag,
103
+ like HTML's <br> tag), call handle_starttag and then
104
+ handle_endtag.
105
+ """
106
+
107
+ # Since BeautifulSoup subclasses Tag, it's possible to treat it as
108
+ # a Tag with a .name. This name makes it clear the BeautifulSoup
109
+ # object isn't a real markup tag.
110
+ ROOT_TAG_NAME = '[document]'
111
+
112
+ # If the end-user gives no indication which tree builder they
113
+ # want, look for one with these features.
114
+ DEFAULT_BUILDER_FEATURES = ['html', 'fast']
115
+
116
+ # A string containing all ASCII whitespace characters, used in
117
+ # endData() to detect data chunks that seem 'empty'.
118
+ ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
119
+
120
+ NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
121
+
122
+ def __init__(self, markup="", features=None, builder=None,
123
+ parse_only=None, from_encoding=None, exclude_encodings=None,
124
+ element_classes=None, **kwargs):
125
+ """Constructor.
126
+
127
+ :param markup: A string or a file-like object representing
128
+ markup to be parsed.
129
+
130
+ :param features: Desirable features of the parser to be
131
+ used. This may be the name of a specific parser ("lxml",
132
+ "lxml-xml", "html.parser", or "html5lib") or it may be the
133
+ type of markup to be used ("html", "html5", "xml"). It's
134
+ recommended that you name a specific parser, so that
135
+ Beautiful Soup gives you the same results across platforms
136
+ and virtual environments.
137
+
138
+ :param builder: A TreeBuilder subclass to instantiate (or
139
+ instance to use) instead of looking one up based on
140
+ `features`. You only need to use this if you've implemented a
141
+ custom TreeBuilder.
142
+
143
+ :param parse_only: A SoupStrainer. Only parts of the document
144
+ matching the SoupStrainer will be considered. This is useful
145
+ when parsing part of a document that would otherwise be too
146
+ large to fit into memory.
147
+
148
+ :param from_encoding: A string indicating the encoding of the
149
+ document to be parsed. Pass this in if Beautiful Soup is
150
+ guessing wrongly about the document's encoding.
151
+
152
+ :param exclude_encodings: A list of strings indicating
153
+ encodings known to be wrong. Pass this in if you don't know
154
+ the document's encoding but you know Beautiful Soup's guess is
155
+ wrong.
156
+
157
+ :param element_classes: A dictionary mapping BeautifulSoup
158
+ classes like Tag and NavigableString, to other classes you'd
159
+ like to be instantiated instead as the parse tree is
160
+ built. This is useful for subclassing Tag or NavigableString
161
+ to modify default behavior.
162
+
163
+ :param kwargs: For backwards compatibility purposes, the
164
+ constructor accepts certain keyword arguments used in
165
+ Beautiful Soup 3. None of these arguments do anything in
166
+ Beautiful Soup 4; they will result in a warning and then be
167
+ ignored.
168
+
169
+ Apart from this, any keyword arguments passed into the
170
+ BeautifulSoup constructor are propagated to the TreeBuilder
171
+ constructor. This makes it possible to configure a
172
+ TreeBuilder by passing in arguments, not just by saying which
173
+ one to use.
174
+ """
175
+ if 'convertEntities' in kwargs:
176
+ del kwargs['convertEntities']
177
+ warnings.warn(
178
+ "BS4 does not respect the convertEntities argument to the "
179
+ "BeautifulSoup constructor. Entities are always converted "
180
+ "to Unicode characters.")
181
+
182
+ if 'markupMassage' in kwargs:
183
+ del kwargs['markupMassage']
184
+ warnings.warn(
185
+ "BS4 does not respect the markupMassage argument to the "
186
+ "BeautifulSoup constructor. The tree builder is responsible "
187
+ "for any necessary markup massage.")
188
+
189
+ if 'smartQuotesTo' in kwargs:
190
+ del kwargs['smartQuotesTo']
191
+ warnings.warn(
192
+ "BS4 does not respect the smartQuotesTo argument to the "
193
+ "BeautifulSoup constructor. Smart quotes are always converted "
194
+ "to Unicode characters.")
195
+
196
+ if 'selfClosingTags' in kwargs:
197
+ del kwargs['selfClosingTags']
198
+ warnings.warn(
199
+ "BS4 does not respect the selfClosingTags argument to the "
200
+ "BeautifulSoup constructor. The tree builder is responsible "
201
+ "for understanding self-closing tags.")
202
+
203
+ if 'isHTML' in kwargs:
204
+ del kwargs['isHTML']
205
+ warnings.warn(
206
+ "BS4 does not respect the isHTML argument to the "
207
+ "BeautifulSoup constructor. Suggest you use "
208
+ "features='lxml' for HTML and features='lxml-xml' for "
209
+ "XML.")
210
+
211
+ def deprecated_argument(old_name, new_name):
212
+ if old_name in kwargs:
213
+ warnings.warn(
214
+ 'The "%s" argument to the BeautifulSoup constructor '
215
+ 'has been renamed to "%s."' % (old_name, new_name),
216
+ DeprecationWarning, stacklevel=3
217
+ )
218
+ return kwargs.pop(old_name)
219
+ return None
220
+
221
+ parse_only = parse_only or deprecated_argument(
222
+ "parseOnlyThese", "parse_only")
223
+
224
+ from_encoding = from_encoding or deprecated_argument(
225
+ "fromEncoding", "from_encoding")
226
+
227
+ if from_encoding and isinstance(markup, str):
228
+ warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
229
+ from_encoding = None
230
+
231
+ self.element_classes = element_classes or dict()
232
+
233
+ # We need this information to track whether or not the builder
234
+ # was specified well enough that we can omit the 'you need to
235
+ # specify a parser' warning.
236
+ original_builder = builder
237
+ original_features = features
238
+
239
+ if isinstance(builder, type):
240
+ # A builder class was passed in; it needs to be instantiated.
241
+ builder_class = builder
242
+ builder = None
243
+ elif builder is None:
244
+ if isinstance(features, str):
245
+ features = [features]
246
+ if features is None or len(features) == 0:
247
+ features = self.DEFAULT_BUILDER_FEATURES
248
+ builder_class = builder_registry.lookup(*features)
249
+ if builder_class is None:
250
+ raise FeatureNotFound(
251
+ "Couldn't find a tree builder with the features you "
252
+ "requested: %s. Do you need to install a parser library?"
253
+ % ",".join(features))
254
+
255
+ # At this point either we have a TreeBuilder instance in
256
+ # builder, or we have a builder_class that we can instantiate
257
+ # with the remaining **kwargs.
258
+ if builder is None:
259
+ builder = builder_class(**kwargs)
260
+ if not original_builder and not (
261
+ original_features == builder.NAME or
262
+ original_features in builder.ALTERNATE_NAMES
263
+ ) and markup:
264
+ # The user did not tell us which TreeBuilder to use,
265
+ # and we had to guess. Issue a warning.
266
+ if builder.is_xml:
267
+ markup_type = "XML"
268
+ else:
269
+ markup_type = "HTML"
270
+
271
+ # This code adapted from warnings.py so that we get the same line
272
+ # of code as our warnings.warn() call gets, even if the answer is wrong
273
+ # (as it may be in a multithreading situation).
274
+ caller = None
275
+ try:
276
+ caller = sys._getframe(1)
277
+ except ValueError:
278
+ pass
279
+ if caller:
280
+ globals = caller.f_globals
281
+ line_number = caller.f_lineno
282
+ else:
283
+ globals = sys.__dict__
284
+ line_number= 1
285
+ filename = globals.get('__file__')
286
+ if filename:
287
+ fnl = filename.lower()
288
+ if fnl.endswith((".pyc", ".pyo")):
289
+ filename = filename[:-1]
290
+ if filename:
291
+ # If there is no filename at all, the user is most likely in a REPL,
292
+ # and the warning is not necessary.
293
+ values = dict(
294
+ filename=filename,
295
+ line_number=line_number,
296
+ parser=builder.NAME,
297
+ markup_type=markup_type
298
+ )
299
+ warnings.warn(
300
+ self.NO_PARSER_SPECIFIED_WARNING % values,
301
+ GuessedAtParserWarning, stacklevel=2
302
+ )
303
+ else:
304
+ if kwargs:
305
+ warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
306
+
307
+ self.builder = builder
308
+ self.is_xml = builder.is_xml
309
+ self.known_xml = self.is_xml
310
+ self._namespaces = dict()
311
+ self.parse_only = parse_only
312
+
313
+ if hasattr(markup, 'read'): # It's a file-type object.
314
+ markup = markup.read()
315
+ elif len(markup) <= 256 and (
316
+ (isinstance(markup, bytes) and not b'<' in markup)
317
+ or (isinstance(markup, str) and not '<' in markup)
318
+ ):
319
+ # Issue warnings for a couple beginner problems
320
+ # involving passing non-markup to Beautiful Soup.
321
+ # Beautiful Soup will still parse the input as markup,
322
+ # since that is sometimes the intended behavior.
323
+ if not self._markup_is_url(markup):
324
+ self._markup_resembles_filename(markup)
325
+
326
+ rejections = []
327
+ success = False
328
+ for (self.markup, self.original_encoding, self.declared_html_encoding,
329
+ self.contains_replacement_characters) in (
330
+ self.builder.prepare_markup(
331
+ markup, from_encoding, exclude_encodings=exclude_encodings)):
332
+ self.reset()
333
+ self.builder.initialize_soup(self)
334
+ try:
335
+ self._feed()
336
+ success = True
337
+ break
338
+ except ParserRejectedMarkup as e:
339
+ rejections.append(e)
340
+ pass
341
+
342
+ if not success:
343
+ other_exceptions = [str(e) for e in rejections]
344
+ raise ParserRejectedMarkup(
345
+ "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
346
+ )
347
+
348
+ # Clear out the markup and remove the builder's circular
349
+ # reference to this object.
350
+ self.markup = None
351
+ self.builder.soup = None
352
+
353
+ def _clone(self):
354
+ """Create a new BeautifulSoup object with the same TreeBuilder,
355
+ but not associated with any markup.
356
+
357
+ This is the first step of the deepcopy process.
358
+ """
359
+ clone = type(self)("", None, self.builder)
360
+
361
+ # Keep track of the encoding of the original document,
362
+ # since we won't be parsing it again.
363
+ clone.original_encoding = self.original_encoding
364
+ return clone
365
+
366
+ def __getstate__(self):
367
+ # Frequently a tree builder can't be pickled.
368
+ d = dict(self.__dict__)
369
+ if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
370
+ d['builder'] = type(self.builder)
371
+ # Store the contents as a Unicode string.
372
+ d['contents'] = []
373
+ d['markup'] = self.decode()
374
+
375
+ # If _most_recent_element is present, it's a Tag object left
376
+ # over from initial parse. It might not be picklable and we
377
+ # don't need it.
378
+ if '_most_recent_element' in d:
379
+ del d['_most_recent_element']
380
+ return d
381
+
382
+ def __setstate__(self, state):
383
+ # If necessary, restore the TreeBuilder by looking it up.
384
+ self.__dict__ = state
385
+ if isinstance(self.builder, type):
386
+ self.builder = self.builder()
387
+ elif not self.builder:
388
+ # We don't know which builder was used to build this
389
+ # parse tree, so use a default we know is always available.
390
+ self.builder = HTMLParserTreeBuilder()
391
+ self.builder.soup = self
392
+ self.reset()
393
+ self._feed()
394
+ return state
395
+
396
+
397
+ @classmethod
398
+ def _decode_markup(cls, markup):
399
+ """Ensure `markup` is bytes so it's safe to send into warnings.warn.
400
+
401
+ TODO: warnings.warn had this problem back in 2010 but it might not
402
+ anymore.
403
+ """
404
+ if isinstance(markup, bytes):
405
+ decoded = markup.decode('utf-8', 'replace')
406
+ else:
407
+ decoded = markup
408
+ return decoded
409
+
410
+ @classmethod
411
+ def _markup_is_url(cls, markup):
412
+ """Error-handling method to raise a warning if incoming markup looks
413
+ like a URL.
414
+
415
+ :param markup: A string.
416
+ :return: Whether or not the markup resembles a URL
417
+ closely enough to justify a warning.
418
+ """
419
+ if isinstance(markup, bytes):
420
+ space = b' '
421
+ cant_start_with = (b"http:", b"https:")
422
+ elif isinstance(markup, str):
423
+ space = ' '
424
+ cant_start_with = ("http:", "https:")
425
+ else:
426
+ return False
427
+
428
+ if any(markup.startswith(prefix) for prefix in cant_start_with):
429
+ if not space in markup:
430
+ warnings.warn(
431
+ 'The input looks more like a URL than markup. You may want to use'
432
+ ' an HTTP client like requests to get the document behind'
433
+ ' the URL, and feed that document to Beautiful Soup.',
434
+ MarkupResemblesLocatorWarning,
435
+ stacklevel=3
436
+ )
437
+ return True
438
+ return False
439
+
440
+ @classmethod
441
+ def _markup_resembles_filename(cls, markup):
442
+ """Error-handling method to raise a warning if incoming markup
443
+ resembles a filename.
444
+
445
+ :param markup: A bytestring or string.
446
+ :return: Whether or not the markup resembles a filename
447
+ closely enough to justify a warning.
448
+ """
449
+ path_characters = '/\\'
450
+ extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
451
+ if isinstance(markup, bytes):
452
+ path_characters = path_characters.encode("utf8")
453
+ extensions = [x.encode('utf8') for x in extensions]
454
+ filelike = False
455
+ if any(x in markup for x in path_characters):
456
+ filelike = True
457
+ else:
458
+ lower = markup.lower()
459
+ if any(lower.endswith(ext) for ext in extensions):
460
+ filelike = True
461
+ if filelike:
462
+ warnings.warn(
463
+ 'The input looks more like a filename than markup. You may'
464
+ ' want to open this file and pass the filehandle into'
465
+ ' Beautiful Soup.',
466
+ MarkupResemblesLocatorWarning, stacklevel=3
467
+ )
468
+ return True
469
+ return False
470
+
471
+ def _feed(self):
472
+ """Internal method that parses previously set markup, creating a large
473
+ number of Tag and NavigableString objects.
474
+ """
475
+ # Convert the document to Unicode.
476
+ self.builder.reset()
477
+
478
+ self.builder.feed(self.markup)
479
+ # Close out any unfinished strings and close all the open tags.
480
+ self.endData()
481
+ while self.currentTag.name != self.ROOT_TAG_NAME:
482
+ self.popTag()
483
+
484
+ def reset(self):
485
+ """Reset this object to a state as though it had never parsed any
486
+ markup.
487
+ """
488
+ Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
489
+ self.hidden = 1
490
+ self.builder.reset()
491
+ self.current_data = []
492
+ self.currentTag = None
493
+ self.tagStack = []
494
+ self.open_tag_counter = Counter()
495
+ self.preserve_whitespace_tag_stack = []
496
+ self.string_container_stack = []
497
+ self._most_recent_element = None
498
+ self.pushTag(self)
499
+
500
+ def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
501
+ sourceline=None, sourcepos=None, **kwattrs):
502
+ """Create a new Tag associated with this BeautifulSoup object.
503
+
504
+ :param name: The name of the new Tag.
505
+ :param namespace: The URI of the new Tag's XML namespace, if any.
506
+ :param prefix: The prefix for the new Tag's XML namespace, if any.
507
+ :param attrs: A dictionary of this Tag's attribute values; can
508
+ be used instead of `kwattrs` for attributes like 'class'
509
+ that are reserved words in Python.
510
+ :param sourceline: The line number where this tag was
511
+ (purportedly) found in its source document.
512
+ :param sourcepos: The character position within `sourceline` where this
513
+ tag was (purportedly) found.
514
+ :param kwattrs: Keyword arguments for the new Tag's attribute values.
515
+
516
+ """
517
+ kwattrs.update(attrs)
518
+ return self.element_classes.get(Tag, Tag)(
519
+ None, self.builder, name, namespace, nsprefix, kwattrs,
520
+ sourceline=sourceline, sourcepos=sourcepos
521
+ )
522
+
523
+ def string_container(self, base_class=None):
524
+ container = base_class or NavigableString
525
+
526
+ # There may be a general override of NavigableString.
527
+ container = self.element_classes.get(
528
+ container, container
529
+ )
530
+
531
+ # On top of that, we may be inside a tag that needs a special
532
+ # container class.
533
+ if self.string_container_stack and container is NavigableString:
534
+ container = self.builder.string_containers.get(
535
+ self.string_container_stack[-1].name, container
536
+ )
537
+ return container
538
+
539
+ def new_string(self, s, subclass=None):
540
+ """Create a new NavigableString associated with this BeautifulSoup
541
+ object.
542
+ """
543
+ container = self.string_container(subclass)
544
+ return container(s)
545
+
546
+ def insert_before(self, *args):
547
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
548
+ it because there is nothing before or after it in the parse tree.
549
+ """
550
+ raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
551
+
552
+ def insert_after(self, *args):
553
+ """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
554
+ it because there is nothing before or after it in the parse tree.
555
+ """
556
+ raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
557
+
558
+ def popTag(self):
559
+ """Internal method called by _popToTag when a tag is closed."""
560
+ tag = self.tagStack.pop()
561
+ if tag.name in self.open_tag_counter:
562
+ self.open_tag_counter[tag.name] -= 1
563
+ if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
564
+ self.preserve_whitespace_tag_stack.pop()
565
+ if self.string_container_stack and tag == self.string_container_stack[-1]:
566
+ self.string_container_stack.pop()
567
+ #print("Pop", tag.name)
568
+ if self.tagStack:
569
+ self.currentTag = self.tagStack[-1]
570
+ return self.currentTag
571
+
572
+ def pushTag(self, tag):
573
+ """Internal method called by handle_starttag when a tag is opened."""
574
+ #print("Push", tag.name)
575
+ if self.currentTag is not None:
576
+ self.currentTag.contents.append(tag)
577
+ self.tagStack.append(tag)
578
+ self.currentTag = self.tagStack[-1]
579
+ if tag.name != self.ROOT_TAG_NAME:
580
+ self.open_tag_counter[tag.name] += 1
581
+ if tag.name in self.builder.preserve_whitespace_tags:
582
+ self.preserve_whitespace_tag_stack.append(tag)
583
+ if tag.name in self.builder.string_containers:
584
+ self.string_container_stack.append(tag)
585
+
586
+ def endData(self, containerClass=None):
587
+ """Method called by the TreeBuilder when the end of a data segment
588
+ occurs.
589
+ """
590
+ if self.current_data:
591
+ current_data = ''.join(self.current_data)
592
+ # If whitespace is not preserved, and this string contains
593
+ # nothing but ASCII spaces, replace it with a single space
594
+ # or newline.
595
+ if not self.preserve_whitespace_tag_stack:
596
+ strippable = True
597
+ for i in current_data:
598
+ if i not in self.ASCII_SPACES:
599
+ strippable = False
600
+ break
601
+ if strippable:
602
+ if '\n' in current_data:
603
+ current_data = '\n'
604
+ else:
605
+ current_data = ' '
606
+
607
+ # Reset the data collector.
608
+ self.current_data = []
609
+
610
+ # Should we add this string to the tree at all?
611
+ if self.parse_only and len(self.tagStack) <= 1 and \
612
+ (not self.parse_only.text or \
613
+ not self.parse_only.search(current_data)):
614
+ return
615
+
616
+ containerClass = self.string_container(containerClass)
617
+ o = containerClass(current_data)
618
+ self.object_was_parsed(o)
619
+
620
+ def object_was_parsed(self, o, parent=None, most_recent_element=None):
621
+ """Method called by the TreeBuilder to integrate an object into the parse tree."""
622
+ if parent is None:
623
+ parent = self.currentTag
624
+ if most_recent_element is not None:
625
+ previous_element = most_recent_element
626
+ else:
627
+ previous_element = self._most_recent_element
628
+
629
+ next_element = previous_sibling = next_sibling = None
630
+ if isinstance(o, Tag):
631
+ next_element = o.next_element
632
+ next_sibling = o.next_sibling
633
+ previous_sibling = o.previous_sibling
634
+ if previous_element is None:
635
+ previous_element = o.previous_element
636
+
637
+ fix = parent.next_element is not None
638
+
639
+ o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
640
+
641
+ self._most_recent_element = o
642
+ parent.contents.append(o)
643
+
644
+ # Check if we are inserting into an already parsed node.
645
+ if fix:
646
+ self._linkage_fixer(parent)
647
+
648
+ def _linkage_fixer(self, el):
649
+ """Make sure linkage of this fragment is sound."""
650
+
651
+ first = el.contents[0]
652
+ child = el.contents[-1]
653
+ descendant = child
654
+
655
+ if child is first and el.parent is not None:
656
+ # Parent should be linked to first child
657
+ el.next_element = child
658
+ # We are no longer linked to whatever this element is
659
+ prev_el = child.previous_element
660
+ if prev_el is not None and prev_el is not el:
661
+ prev_el.next_element = None
662
+ # First child should be linked to the parent, and no previous siblings.
663
+ child.previous_element = el
664
+ child.previous_sibling = None
665
+
666
+ # We have no sibling as we've been appended as the last.
667
+ child.next_sibling = None
668
+
669
+ # This index is a tag, dig deeper for a "last descendant"
670
+ if isinstance(child, Tag) and child.contents:
671
+ descendant = child._last_descendant(False)
672
+
673
+ # As the final step, link last descendant. It should be linked
674
+ # to the parent's next sibling (if found), else walk up the chain
675
+ # and find a parent with a sibling. It should have no next sibling.
676
+ descendant.next_element = None
677
+ descendant.next_sibling = None
678
+ target = el
679
+ while True:
680
+ if target is None:
681
+ break
682
+ elif target.next_sibling is not None:
683
+ descendant.next_element = target.next_sibling
684
+ target.next_sibling.previous_element = child
685
+ break
686
+ target = target.parent
687
+
688
+ def _popToTag(self, name, nsprefix=None, inclusivePop=True):
689
+ """Pops the tag stack up to and including the most recent
690
+ instance of the given tag.
691
+
692
+ If there are no open tags with the given name, nothing will be
693
+ popped.
694
+
695
+ :param name: Pop up to the most recent tag with this name.
696
+ :param nsprefix: The namespace prefix that goes with `name`.
697
+ :param inclusivePop: It this is false, pops the tag stack up
698
+ to but *not* including the most recent instqance of the
699
+ given tag.
700
+
701
+ """
702
+ #print("Popping to %s" % name)
703
+ if name == self.ROOT_TAG_NAME:
704
+ # The BeautifulSoup object itself can never be popped.
705
+ return
706
+
707
+ most_recently_popped = None
708
+
709
+ stack_size = len(self.tagStack)
710
+ for i in range(stack_size - 1, 0, -1):
711
+ if not self.open_tag_counter.get(name):
712
+ break
713
+ t = self.tagStack[i]
714
+ if (name == t.name and nsprefix == t.prefix):
715
+ if inclusivePop:
716
+ most_recently_popped = self.popTag()
717
+ break
718
+ most_recently_popped = self.popTag()
719
+
720
+ return most_recently_popped
721
+
722
+ def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
723
+ sourcepos=None, namespaces=None):
724
+ """Called by the tree builder when a new tag is encountered.
725
+
726
+ :param name: Name of the tag.
727
+ :param nsprefix: Namespace prefix for the tag.
728
+ :param attrs: A dictionary of attribute values.
729
+ :param sourceline: The line number where this tag was found in its
730
+ source document.
731
+ :param sourcepos: The character position within `sourceline` where this
732
+ tag was found.
733
+ :param namespaces: A dictionary of all namespace prefix mappings
734
+ currently in scope in the document.
735
+
736
+ If this method returns None, the tag was rejected by an active
737
+ SoupStrainer. You should proceed as if the tag had not occurred
738
+ in the document. For instance, if this was a self-closing tag,
739
+ don't call handle_endtag.
740
+ """
741
+ # print("Start tag %s: %s" % (name, attrs))
742
+ self.endData()
743
+
744
+ if (self.parse_only and len(self.tagStack) <= 1
745
+ and (self.parse_only.text
746
+ or not self.parse_only.search_tag(name, attrs))):
747
+ return None
748
+
749
+ tag = self.element_classes.get(Tag, Tag)(
750
+ self, self.builder, name, namespace, nsprefix, attrs,
751
+ self.currentTag, self._most_recent_element,
752
+ sourceline=sourceline, sourcepos=sourcepos,
753
+ namespaces=namespaces
754
+ )
755
+ if tag is None:
756
+ return tag
757
+ if self._most_recent_element is not None:
758
+ self._most_recent_element.next_element = tag
759
+ self._most_recent_element = tag
760
+ self.pushTag(tag)
761
+ return tag
762
+
763
+ def handle_endtag(self, name, nsprefix=None):
764
+ """Called by the tree builder when an ending tag is encountered.
765
+
766
+ :param name: Name of the tag.
767
+ :param nsprefix: Namespace prefix for the tag.
768
+ """
769
+ #print("End tag: " + name)
770
+ self.endData()
771
+ self._popToTag(name, nsprefix)
772
+
773
+ def handle_data(self, data):
774
+ """Called by the tree builder when a chunk of textual data is encountered."""
775
+ self.current_data.append(data)
776
+
777
+ def decode(self, pretty_print=False,
778
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
779
+ formatter="minimal", iterator=None):
780
+ """Returns a string or Unicode representation of the parse tree
781
+ as an HTML or XML document.
782
+
783
+ :param pretty_print: If this is True, indentation will be used to
784
+ make the document more readable.
785
+ :param eventual_encoding: The encoding of the final document.
786
+ If this is None, the document will be a Unicode string.
787
+ """
788
+ if self.is_xml:
789
+ # Print the XML declaration
790
+ encoding_part = ''
791
+ if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
792
+ # This is a special Python encoding; it can't actually
793
+ # go into an XML document because it means nothing
794
+ # outside of Python.
795
+ eventual_encoding = None
796
+ if eventual_encoding != None:
797
+ encoding_part = ' encoding="%s"' % eventual_encoding
798
+ prefix = '<?xml version="1.0"%s?>\n' % encoding_part
799
+ else:
800
+ prefix = ''
801
+ if not pretty_print:
802
+ indent_level = None
803
+ else:
804
+ indent_level = 0
805
+ return prefix + super(BeautifulSoup, self).decode(
806
+ indent_level, eventual_encoding, formatter, iterator)
807
+
808
+ # Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
809
+ _s = BeautifulSoup
810
+ _soup = BeautifulSoup
811
+
812
+ class BeautifulStoneSoup(BeautifulSoup):
813
+ """Deprecated interface to an XML parser."""
814
+
815
+ def __init__(self, *args, **kwargs):
816
+ kwargs['features'] = 'xml'
817
+ warnings.warn(
818
+ 'The BeautifulStoneSoup class is deprecated. Instead of using '
819
+ 'it, pass features="xml" into the BeautifulSoup constructor.',
820
+ DeprecationWarning, stacklevel=2
821
+ )
822
+ super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
823
+
824
+
825
+ class StopParsing(Exception):
826
+ """Exception raised by a TreeBuilder if it's unable to continue parsing."""
827
+ pass
828
+
829
+ class FeatureNotFound(ValueError):
830
+ """Exception raised by the BeautifulSoup constructor if no parser with the
831
+ requested features is found.
832
+ """
833
+ pass
834
+
835
+
836
+ #If this file is run as a script, act as an HTML pretty-printer.
837
+ if __name__ == '__main__':
838
+ import sys
839
+ soup = BeautifulSoup(sys.stdin)
840
+ print((soup.prettify()))
vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/css.cpython-310.pyc ADDED
Binary file (9.94 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/dammit.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/diagnose.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/__pycache__/formatter.cpython-310.pyc ADDED
Binary file (6.75 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/builder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/builder/__pycache__/_lxml.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/css.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Integration code for CSS selectors using Soup Sieve (pypi: soupsieve)."""
2
+
3
+ import warnings
4
+ try:
5
+ import soupsieve
6
+ except ImportError as e:
7
+ soupsieve = None
8
+ warnings.warn(
9
+ 'The soupsieve package is not installed. CSS selectors cannot be used.'
10
+ )
11
+
12
+
13
+ class CSS(object):
14
+ """A proxy object against the soupsieve library, to simplify its
15
+ CSS selector API.
16
+
17
+ Acquire this object through the .css attribute on the
18
+ BeautifulSoup object, or on the Tag you want to use as the
19
+ starting point for a CSS selector.
20
+
21
+ The main advantage of doing this is that the tag to be selected
22
+ against doesn't need to be explicitly specified in the function
23
+ calls, since it's already scoped to a tag.
24
+ """
25
+
26
+ def __init__(self, tag, api=soupsieve):
27
+ """Constructor.
28
+
29
+ You don't need to instantiate this class yourself; instead,
30
+ access the .css attribute on the BeautifulSoup object, or on
31
+ the Tag you want to use as the starting point for your CSS
32
+ selector.
33
+
34
+ :param tag: All CSS selectors will use this as their starting
35
+ point.
36
+
37
+ :param api: A plug-in replacement for the soupsieve module,
38
+ designed mainly for use in tests.
39
+ """
40
+ if api is None:
41
+ raise NotImplementedError(
42
+ "Cannot execute CSS selectors because the soupsieve package is not installed."
43
+ )
44
+ self.api = api
45
+ self.tag = tag
46
+
47
+ def escape(self, ident):
48
+ """Escape a CSS identifier.
49
+
50
+ This is a simple wrapper around soupselect.escape(). See the
51
+ documentation for that function for more information.
52
+ """
53
+ if soupsieve is None:
54
+ raise NotImplementedError(
55
+ "Cannot escape CSS identifiers because the soupsieve package is not installed."
56
+ )
57
+ return self.api.escape(ident)
58
+
59
+ def _ns(self, ns, select):
60
+ """Normalize a dictionary of namespaces."""
61
+ if not isinstance(select, self.api.SoupSieve) and ns is None:
62
+ # If the selector is a precompiled pattern, it already has
63
+ # a namespace context compiled in, which cannot be
64
+ # replaced.
65
+ ns = self.tag._namespaces
66
+ return ns
67
+
68
+ def _rs(self, results):
69
+ """Normalize a list of results to a Resultset.
70
+
71
+ A ResultSet is more consistent with the rest of Beautiful
72
+ Soup's API, and ResultSet.__getattr__ has a helpful error
73
+ message if you try to treat a list of results as a single
74
+ result (a common mistake).
75
+ """
76
+ # Import here to avoid circular import
77
+ from bs4.element import ResultSet
78
+ return ResultSet(None, results)
79
+
80
+ def compile(self, select, namespaces=None, flags=0, **kwargs):
81
+ """Pre-compile a selector and return the compiled object.
82
+
83
+ :param selector: A CSS selector.
84
+
85
+ :param namespaces: A dictionary mapping namespace prefixes
86
+ used in the CSS selector to namespace URIs. By default,
87
+ Beautiful Soup will use the prefixes it encountered while
88
+ parsing the document.
89
+
90
+ :param flags: Flags to be passed into Soup Sieve's
91
+ soupsieve.compile() method.
92
+
93
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
94
+ soupsieve.compile() method.
95
+
96
+ :return: A precompiled selector object.
97
+ :rtype: soupsieve.SoupSieve
98
+ """
99
+ return self.api.compile(
100
+ select, self._ns(namespaces, select), flags, **kwargs
101
+ )
102
+
103
+ def select_one(self, select, namespaces=None, flags=0, **kwargs):
104
+ """Perform a CSS selection operation on the current Tag and return the
105
+ first result.
106
+
107
+ This uses the Soup Sieve library. For more information, see
108
+ that library's documentation for the soupsieve.select_one()
109
+ method.
110
+
111
+ :param selector: A CSS selector.
112
+
113
+ :param namespaces: A dictionary mapping namespace prefixes
114
+ used in the CSS selector to namespace URIs. By default,
115
+ Beautiful Soup will use the prefixes it encountered while
116
+ parsing the document.
117
+
118
+ :param flags: Flags to be passed into Soup Sieve's
119
+ soupsieve.select_one() method.
120
+
121
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
122
+ soupsieve.select_one() method.
123
+
124
+ :return: A Tag, or None if the selector has no match.
125
+ :rtype: bs4.element.Tag
126
+
127
+ """
128
+ return self.api.select_one(
129
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
130
+ )
131
+
132
+ def select(self, select, namespaces=None, limit=0, flags=0, **kwargs):
133
+ """Perform a CSS selection operation on the current Tag.
134
+
135
+ This uses the Soup Sieve library. For more information, see
136
+ that library's documentation for the soupsieve.select()
137
+ method.
138
+
139
+ :param selector: A string containing a CSS selector.
140
+
141
+ :param namespaces: A dictionary mapping namespace prefixes
142
+ used in the CSS selector to namespace URIs. By default,
143
+ Beautiful Soup will pass in the prefixes it encountered while
144
+ parsing the document.
145
+
146
+ :param limit: After finding this number of results, stop looking.
147
+
148
+ :param flags: Flags to be passed into Soup Sieve's
149
+ soupsieve.select() method.
150
+
151
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
152
+ soupsieve.select() method.
153
+
154
+ :return: A ResultSet of Tag objects.
155
+ :rtype: bs4.element.ResultSet
156
+
157
+ """
158
+ if limit is None:
159
+ limit = 0
160
+
161
+ return self._rs(
162
+ self.api.select(
163
+ select, self.tag, self._ns(namespaces, select), limit, flags,
164
+ **kwargs
165
+ )
166
+ )
167
+
168
+ def iselect(self, select, namespaces=None, limit=0, flags=0, **kwargs):
169
+ """Perform a CSS selection operation on the current Tag.
170
+
171
+ This uses the Soup Sieve library. For more information, see
172
+ that library's documentation for the soupsieve.iselect()
173
+ method. It is the same as select(), but it returns a generator
174
+ instead of a list.
175
+
176
+ :param selector: A string containing a CSS selector.
177
+
178
+ :param namespaces: A dictionary mapping namespace prefixes
179
+ used in the CSS selector to namespace URIs. By default,
180
+ Beautiful Soup will pass in the prefixes it encountered while
181
+ parsing the document.
182
+
183
+ :param limit: After finding this number of results, stop looking.
184
+
185
+ :param flags: Flags to be passed into Soup Sieve's
186
+ soupsieve.iselect() method.
187
+
188
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
189
+ soupsieve.iselect() method.
190
+
191
+ :return: A generator
192
+ :rtype: types.GeneratorType
193
+ """
194
+ return self.api.iselect(
195
+ select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
196
+ )
197
+
198
+ def closest(self, select, namespaces=None, flags=0, **kwargs):
199
+ """Find the Tag closest to this one that matches the given selector.
200
+
201
+ This uses the Soup Sieve library. For more information, see
202
+ that library's documentation for the soupsieve.closest()
203
+ method.
204
+
205
+ :param selector: A string containing a CSS selector.
206
+
207
+ :param namespaces: A dictionary mapping namespace prefixes
208
+ used in the CSS selector to namespace URIs. By default,
209
+ Beautiful Soup will pass in the prefixes it encountered while
210
+ parsing the document.
211
+
212
+ :param flags: Flags to be passed into Soup Sieve's
213
+ soupsieve.closest() method.
214
+
215
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
216
+ soupsieve.closest() method.
217
+
218
+ :return: A Tag, or None if there is no match.
219
+ :rtype: bs4.Tag
220
+
221
+ """
222
+ return self.api.closest(
223
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
224
+ )
225
+
226
+ def match(self, select, namespaces=None, flags=0, **kwargs):
227
+ """Check whether this Tag matches the given CSS selector.
228
+
229
+ This uses the Soup Sieve library. For more information, see
230
+ that library's documentation for the soupsieve.match()
231
+ method.
232
+
233
+ :param: a CSS selector.
234
+
235
+ :param namespaces: A dictionary mapping namespace prefixes
236
+ used in the CSS selector to namespace URIs. By default,
237
+ Beautiful Soup will pass in the prefixes it encountered while
238
+ parsing the document.
239
+
240
+ :param flags: Flags to be passed into Soup Sieve's
241
+ soupsieve.match() method.
242
+
243
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
244
+ soupsieve.match() method.
245
+
246
+ :return: True if this Tag matches the selector; False otherwise.
247
+ :rtype: bool
248
+ """
249
+ return self.api.match(
250
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
251
+ )
252
+
253
+ def filter(self, select, namespaces=None, flags=0, **kwargs):
254
+ """Filter this Tag's direct children based on the given CSS selector.
255
+
256
+ This uses the Soup Sieve library. It works the same way as
257
+ passing this Tag into that library's soupsieve.filter()
258
+ method. More information, for more information see the
259
+ documentation for soupsieve.filter().
260
+
261
+ :param namespaces: A dictionary mapping namespace prefixes
262
+ used in the CSS selector to namespace URIs. By default,
263
+ Beautiful Soup will pass in the prefixes it encountered while
264
+ parsing the document.
265
+
266
+ :param flags: Flags to be passed into Soup Sieve's
267
+ soupsieve.filter() method.
268
+
269
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
270
+ soupsieve.filter() method.
271
+
272
+ :return: A ResultSet of Tag objects.
273
+ :rtype: bs4.element.ResultSet
274
+
275
+ """
276
+ return self._rs(
277
+ self.api.filter(
278
+ select, self.tag, self._ns(namespaces, select), flags, **kwargs
279
+ )
280
+ )
vlmpy310/lib/python3.10/site-packages/bs4/dammit.py ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Beautiful Soup bonus library: Unicode, Dammit
3
+
4
+ This library converts a bytestream to Unicode through any means
5
+ necessary. It is heavily based on code from Mark Pilgrim's Universal
6
+ Feed Parser. It works best on XML and HTML, but it does not rewrite the
7
+ XML or HTML to reflect a new encoding; that's the tree builder's job.
8
+ """
9
+ # Use of this source code is governed by the MIT license.
10
+ __license__ = "MIT"
11
+
12
+ from html.entities import codepoint2name
13
+ from collections import defaultdict
14
+ import codecs
15
+ import re
16
+ import logging
17
+ import string
18
+
19
+ # Import a library to autodetect character encodings. We'll support
20
+ # any of a number of libraries that all support the same API:
21
+ #
22
+ # * cchardet
23
+ # * chardet
24
+ # * charset-normalizer
25
+ chardet_module = None
26
+ try:
27
+ # PyPI package: cchardet
28
+ import cchardet as chardet_module
29
+ except ImportError:
30
+ try:
31
+ # Debian package: python-chardet
32
+ # PyPI package: chardet
33
+ import chardet as chardet_module
34
+ except ImportError:
35
+ try:
36
+ # PyPI package: charset-normalizer
37
+ import charset_normalizer as chardet_module
38
+ except ImportError:
39
+ # No chardet available.
40
+ chardet_module = None
41
+
42
+ if chardet_module:
43
+ def chardet_dammit(s):
44
+ if isinstance(s, str):
45
+ return None
46
+ return chardet_module.detect(s)['encoding']
47
+ else:
48
+ def chardet_dammit(s):
49
+ return None
50
+
51
+ # Build bytestring and Unicode versions of regular expressions for finding
52
+ # a declared encoding inside an XML or HTML document.
53
+ xml_encoding = '^\\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>'
54
+ html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]'
55
+ encoding_res = dict()
56
+ encoding_res[bytes] = {
57
+ 'html' : re.compile(html_meta.encode("ascii"), re.I),
58
+ 'xml' : re.compile(xml_encoding.encode("ascii"), re.I),
59
+ }
60
+ encoding_res[str] = {
61
+ 'html' : re.compile(html_meta, re.I),
62
+ 'xml' : re.compile(xml_encoding, re.I)
63
+ }
64
+
65
+ from html.entities import html5
66
+
67
+ class EntitySubstitution(object):
68
+ """The ability to substitute XML or HTML entities for certain characters."""
69
+
70
+ def _populate_class_variables():
71
+ """Initialize variables used by this class to manage the plethora of
72
+ HTML5 named entities.
73
+
74
+ This function returns a 3-tuple containing two dictionaries
75
+ and a regular expression:
76
+
77
+ unicode_to_name - A mapping of Unicode strings like "⦨" to
78
+ entity names like "angmsdaa". When a single Unicode string has
79
+ multiple entity names, we try to choose the most commonly-used
80
+ name.
81
+
82
+ name_to_unicode: A mapping of entity names like "angmsdaa" to
83
+ Unicode strings like "⦨".
84
+
85
+ named_entity_re: A regular expression matching (almost) any
86
+ Unicode string that corresponds to an HTML5 named entity.
87
+ """
88
+ unicode_to_name = {}
89
+ name_to_unicode = {}
90
+
91
+ short_entities = set()
92
+ long_entities_by_first_character = defaultdict(set)
93
+
94
+ for name_with_semicolon, character in sorted(html5.items()):
95
+ # "It is intentional, for legacy compatibility, that many
96
+ # code points have multiple character reference names. For
97
+ # example, some appear both with and without the trailing
98
+ # semicolon, or with different capitalizations."
99
+ # - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
100
+ #
101
+ # The parsers are in charge of handling (or not) character
102
+ # references with no trailing semicolon, so we remove the
103
+ # semicolon whenever it appears.
104
+ if name_with_semicolon.endswith(';'):
105
+ name = name_with_semicolon[:-1]
106
+ else:
107
+ name = name_with_semicolon
108
+
109
+ # When parsing HTML, we want to recognize any known named
110
+ # entity and convert it to a sequence of Unicode
111
+ # characters.
112
+ if name not in name_to_unicode:
113
+ name_to_unicode[name] = character
114
+
115
+ # When _generating_ HTML, we want to recognize special
116
+ # character sequences that _could_ be converted to named
117
+ # entities.
118
+ unicode_to_name[character] = name
119
+
120
+ # We also need to build a regular expression that lets us
121
+ # _find_ those characters in output strings so we can
122
+ # replace them.
123
+ #
124
+ # This is tricky, for two reasons.
125
+
126
+ if (len(character) == 1 and ord(character) < 128
127
+ and character not in '<>&'):
128
+ # First, it would be annoying to turn single ASCII
129
+ # characters like | into named entities like
130
+ # &verbar;. The exceptions are <>&, which we _must_
131
+ # turn into named entities to produce valid HTML.
132
+ continue
133
+
134
+ if len(character) > 1 and all(ord(x) < 128 for x in character):
135
+ # We also do not want to turn _combinations_ of ASCII
136
+ # characters like 'fj' into named entities like '&fjlig;',
137
+ # though that's more debateable.
138
+ continue
139
+
140
+ # Second, some named entities have a Unicode value that's
141
+ # a subset of the Unicode value for some _other_ named
142
+ # entity. As an example, \u2267' is &GreaterFullEqual;,
143
+ # but '\u2267\u0338' is &NotGreaterFullEqual;. Our regular
144
+ # expression needs to match the first two characters of
145
+ # "\u2267\u0338foo", but only the first character of
146
+ # "\u2267foo".
147
+ #
148
+ # In this step, we build two sets of characters that
149
+ # _eventually_ need to go into the regular expression. But
150
+ # we won't know exactly what the regular expression needs
151
+ # to look like until we've gone through the entire list of
152
+ # named entities.
153
+ if len(character) == 1:
154
+ short_entities.add(character)
155
+ else:
156
+ long_entities_by_first_character[character[0]].add(character)
157
+
158
+ # Now that we've been through the entire list of entities, we
159
+ # can create a regular expression that matches any of them.
160
+ particles = set()
161
+ for short in short_entities:
162
+ long_versions = long_entities_by_first_character[short]
163
+ if not long_versions:
164
+ particles.add(short)
165
+ else:
166
+ ignore = "".join([x[1] for x in long_versions])
167
+ # This finds, e.g. \u2267 but only if it is _not_
168
+ # followed by \u0338.
169
+ particles.add("%s(?![%s])" % (short, ignore))
170
+
171
+ for long_entities in list(long_entities_by_first_character.values()):
172
+ for long_entity in long_entities:
173
+ particles.add(long_entity)
174
+
175
+ re_definition = "(%s)" % "|".join(particles)
176
+
177
+ # If an entity shows up in both html5 and codepoint2name, it's
178
+ # likely that HTML5 gives it several different names, such as
179
+ # 'rsquo' and 'rsquor'. When converting Unicode characters to
180
+ # named entities, the codepoint2name name should take
181
+ # precedence where possible, since that's the more easily
182
+ # recognizable one.
183
+ for codepoint, name in list(codepoint2name.items()):
184
+ character = chr(codepoint)
185
+ unicode_to_name[character] = name
186
+
187
+ return unicode_to_name, name_to_unicode, re.compile(re_definition)
188
+ (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
189
+ CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
190
+
191
+ CHARACTER_TO_XML_ENTITY = {
192
+ "'": "apos",
193
+ '"': "quot",
194
+ "&": "amp",
195
+ "<": "lt",
196
+ ">": "gt",
197
+ }
198
+
199
+ BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
200
+ "&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)"
201
+ ")")
202
+
203
+ AMPERSAND_OR_BRACKET = re.compile("([<>&])")
204
+
205
+ @classmethod
206
+ def _substitute_html_entity(cls, matchobj):
207
+ """Used with a regular expression to substitute the
208
+ appropriate HTML entity for a special character string."""
209
+ entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
210
+ return "&%s;" % entity
211
+
212
+ @classmethod
213
+ def _substitute_xml_entity(cls, matchobj):
214
+ """Used with a regular expression to substitute the
215
+ appropriate XML entity for a special character string."""
216
+ entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
217
+ return "&%s;" % entity
218
+
219
+ @classmethod
220
+ def quoted_attribute_value(self, value):
221
+ """Make a value into a quoted XML attribute, possibly escaping it.
222
+
223
+ Most strings will be quoted using double quotes.
224
+
225
+ Bob's Bar -> "Bob's Bar"
226
+
227
+ If a string contains double quotes, it will be quoted using
228
+ single quotes.
229
+
230
+ Welcome to "my bar" -> 'Welcome to "my bar"'
231
+
232
+ If a string contains both single and double quotes, the
233
+ double quotes will be escaped, and the string will be quoted
234
+ using double quotes.
235
+
236
+ Welcome to "Bob's Bar" -> "Welcome to &quot;Bob's bar&quot;
237
+ """
238
+ quote_with = '"'
239
+ if '"' in value:
240
+ if "'" in value:
241
+ # The string contains both single and double
242
+ # quotes. Turn the double quotes into
243
+ # entities. We quote the double quotes rather than
244
+ # the single quotes because the entity name is
245
+ # "&quot;" whether this is HTML or XML. If we
246
+ # quoted the single quotes, we'd have to decide
247
+ # between &apos; and &squot;.
248
+ replace_with = "&quot;"
249
+ value = value.replace('"', replace_with)
250
+ else:
251
+ # There are double quotes but no single quotes.
252
+ # We can use single quotes to quote the attribute.
253
+ quote_with = "'"
254
+ return quote_with + value + quote_with
255
+
256
+ @classmethod
257
+ def substitute_xml(cls, value, make_quoted_attribute=False):
258
+ """Substitute XML entities for special XML characters.
259
+
260
+ :param value: A string to be substituted. The less-than sign
261
+ will become &lt;, the greater-than sign will become &gt;,
262
+ and any ampersands will become &amp;. If you want ampersands
263
+ that appear to be part of an entity definition to be left
264
+ alone, use substitute_xml_containing_entities() instead.
265
+
266
+ :param make_quoted_attribute: If True, then the string will be
267
+ quoted, as befits an attribute value.
268
+ """
269
+ # Escape angle brackets and ampersands.
270
+ value = cls.AMPERSAND_OR_BRACKET.sub(
271
+ cls._substitute_xml_entity, value)
272
+
273
+ if make_quoted_attribute:
274
+ value = cls.quoted_attribute_value(value)
275
+ return value
276
+
277
+ @classmethod
278
+ def substitute_xml_containing_entities(
279
+ cls, value, make_quoted_attribute=False):
280
+ """Substitute XML entities for special XML characters.
281
+
282
+ :param value: A string to be substituted. The less-than sign will
283
+ become &lt;, the greater-than sign will become &gt;, and any
284
+ ampersands that are not part of an entity defition will
285
+ become &amp;.
286
+
287
+ :param make_quoted_attribute: If True, then the string will be
288
+ quoted, as befits an attribute value.
289
+ """
290
+ # Escape angle brackets, and ampersands that aren't part of
291
+ # entities.
292
+ value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
293
+ cls._substitute_xml_entity, value)
294
+
295
+ if make_quoted_attribute:
296
+ value = cls.quoted_attribute_value(value)
297
+ return value
298
+
299
+ @classmethod
300
+ def substitute_html(cls, s):
301
+ """Replace certain Unicode characters with named HTML entities.
302
+
303
+ This differs from data.encode(encoding, 'xmlcharrefreplace')
304
+ in that the goal is to make the result more readable (to those
305
+ with ASCII displays) rather than to recover from
306
+ errors. There's absolutely nothing wrong with a UTF-8 string
307
+ containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
308
+ character with "&eacute;" will make it more readable to some
309
+ people.
310
+
311
+ :param s: A Unicode string.
312
+ """
313
+ return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
314
+ cls._substitute_html_entity, s)
315
+
316
+
317
+ class EncodingDetector:
318
+ """Suggests a number of possible encodings for a bytestring.
319
+
320
+ Order of precedence:
321
+
322
+ 1. Encodings you specifically tell EncodingDetector to try first
323
+ (the known_definite_encodings argument to the constructor).
324
+
325
+ 2. An encoding determined by sniffing the document's byte-order mark.
326
+
327
+ 3. Encodings you specifically tell EncodingDetector to try if
328
+ byte-order mark sniffing fails (the user_encodings argument to the
329
+ constructor).
330
+
331
+ 4. An encoding declared within the bytestring itself, either in an
332
+ XML declaration (if the bytestring is to be interpreted as an XML
333
+ document), or in a <meta> tag (if the bytestring is to be
334
+ interpreted as an HTML document.)
335
+
336
+ 5. An encoding detected through textual analysis by chardet,
337
+ cchardet, or a similar external library.
338
+
339
+ 4. UTF-8.
340
+
341
+ 5. Windows-1252.
342
+
343
+ """
344
+ def __init__(self, markup, known_definite_encodings=None,
345
+ is_html=False, exclude_encodings=None,
346
+ user_encodings=None, override_encodings=None):
347
+ """Constructor.
348
+
349
+ :param markup: Some markup in an unknown encoding.
350
+
351
+ :param known_definite_encodings: When determining the encoding
352
+ of `markup`, these encodings will be tried first, in
353
+ order. In HTML terms, this corresponds to the "known
354
+ definite encoding" step defined here:
355
+ https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
356
+
357
+ :param user_encodings: These encodings will be tried after the
358
+ `known_definite_encodings` have been tried and failed, and
359
+ after an attempt to sniff the encoding by looking at a
360
+ byte order mark has failed. In HTML terms, this
361
+ corresponds to the step "user has explicitly instructed
362
+ the user agent to override the document's character
363
+ encoding", defined here:
364
+ https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
365
+
366
+ :param override_encodings: A deprecated alias for
367
+ known_definite_encodings. Any encodings here will be tried
368
+ immediately after the encodings in
369
+ known_definite_encodings.
370
+
371
+ :param is_html: If True, this markup is considered to be
372
+ HTML. Otherwise it's assumed to be XML.
373
+
374
+ :param exclude_encodings: These encodings will not be tried,
375
+ even if they otherwise would be.
376
+
377
+ """
378
+ self.known_definite_encodings = list(known_definite_encodings or [])
379
+ if override_encodings:
380
+ self.known_definite_encodings += override_encodings
381
+ self.user_encodings = user_encodings or []
382
+ exclude_encodings = exclude_encodings or []
383
+ self.exclude_encodings = set([x.lower() for x in exclude_encodings])
384
+ self.chardet_encoding = None
385
+ self.is_html = is_html
386
+ self.declared_encoding = None
387
+
388
+ # First order of business: strip a byte-order mark.
389
+ self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
390
+
391
+ def _usable(self, encoding, tried):
392
+ """Should we even bother to try this encoding?
393
+
394
+ :param encoding: Name of an encoding.
395
+ :param tried: Encodings that have already been tried. This will be modified
396
+ as a side effect.
397
+ """
398
+ if encoding is not None:
399
+ encoding = encoding.lower()
400
+ if encoding in self.exclude_encodings:
401
+ return False
402
+ if encoding not in tried:
403
+ tried.add(encoding)
404
+ return True
405
+ return False
406
+
407
+ @property
408
+ def encodings(self):
409
+ """Yield a number of encodings that might work for this markup.
410
+
411
+ :yield: A sequence of strings.
412
+ """
413
+ tried = set()
414
+
415
+ # First, try the known definite encodings
416
+ for e in self.known_definite_encodings:
417
+ if self._usable(e, tried):
418
+ yield e
419
+
420
+ # Did the document originally start with a byte-order mark
421
+ # that indicated its encoding?
422
+ if self._usable(self.sniffed_encoding, tried):
423
+ yield self.sniffed_encoding
424
+
425
+ # Sniffing the byte-order mark did nothing; try the user
426
+ # encodings.
427
+ for e in self.user_encodings:
428
+ if self._usable(e, tried):
429
+ yield e
430
+
431
+ # Look within the document for an XML or HTML encoding
432
+ # declaration.
433
+ if self.declared_encoding is None:
434
+ self.declared_encoding = self.find_declared_encoding(
435
+ self.markup, self.is_html)
436
+ if self._usable(self.declared_encoding, tried):
437
+ yield self.declared_encoding
438
+
439
+ # Use third-party character set detection to guess at the
440
+ # encoding.
441
+ if self.chardet_encoding is None:
442
+ self.chardet_encoding = chardet_dammit(self.markup)
443
+ if self._usable(self.chardet_encoding, tried):
444
+ yield self.chardet_encoding
445
+
446
+ # As a last-ditch effort, try utf-8 and windows-1252.
447
+ for e in ('utf-8', 'windows-1252'):
448
+ if self._usable(e, tried):
449
+ yield e
450
+
451
+ @classmethod
452
+ def strip_byte_order_mark(cls, data):
453
+ """If a byte-order mark is present, strip it and return the encoding it implies.
454
+
455
+ :param data: Some markup.
456
+ :return: A 2-tuple (modified data, implied encoding)
457
+ """
458
+ encoding = None
459
+ if isinstance(data, str):
460
+ # Unicode data cannot have a byte-order mark.
461
+ return data, encoding
462
+ if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
463
+ and (data[2:4] != '\x00\x00'):
464
+ encoding = 'utf-16be'
465
+ data = data[2:]
466
+ elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
467
+ and (data[2:4] != '\x00\x00'):
468
+ encoding = 'utf-16le'
469
+ data = data[2:]
470
+ elif data[:3] == b'\xef\xbb\xbf':
471
+ encoding = 'utf-8'
472
+ data = data[3:]
473
+ elif data[:4] == b'\x00\x00\xfe\xff':
474
+ encoding = 'utf-32be'
475
+ data = data[4:]
476
+ elif data[:4] == b'\xff\xfe\x00\x00':
477
+ encoding = 'utf-32le'
478
+ data = data[4:]
479
+ return data, encoding
480
+
481
+ @classmethod
482
+ def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
483
+ """Given a document, tries to find its declared encoding.
484
+
485
+ An XML encoding is declared at the beginning of the document.
486
+
487
+ An HTML encoding is declared in a <meta> tag, hopefully near the
488
+ beginning of the document.
489
+
490
+ :param markup: Some markup.
491
+ :param is_html: If True, this markup is considered to be HTML. Otherwise
492
+ it's assumed to be XML.
493
+ :param search_entire_document: Since an encoding is supposed to declared near the beginning
494
+ of the document, most of the time it's only necessary to search a few kilobytes of data.
495
+ Set this to True to force this method to search the entire document.
496
+ """
497
+ if search_entire_document:
498
+ xml_endpos = html_endpos = len(markup)
499
+ else:
500
+ xml_endpos = 1024
501
+ html_endpos = max(2048, int(len(markup) * 0.05))
502
+
503
+ if isinstance(markup, bytes):
504
+ res = encoding_res[bytes]
505
+ else:
506
+ res = encoding_res[str]
507
+
508
+ xml_re = res['xml']
509
+ html_re = res['html']
510
+ declared_encoding = None
511
+ declared_encoding_match = xml_re.search(markup, endpos=xml_endpos)
512
+ if not declared_encoding_match and is_html:
513
+ declared_encoding_match = html_re.search(markup, endpos=html_endpos)
514
+ if declared_encoding_match is not None:
515
+ declared_encoding = declared_encoding_match.groups()[0]
516
+ if declared_encoding:
517
+ if isinstance(declared_encoding, bytes):
518
+ declared_encoding = declared_encoding.decode('ascii', 'replace')
519
+ return declared_encoding.lower()
520
+ return None
521
+
522
+ class UnicodeDammit:
523
+ """A class for detecting the encoding of a *ML document and
524
+ converting it to a Unicode string. If the source encoding is
525
+ windows-1252, can replace MS smart quotes with their HTML or XML
526
+ equivalents."""
527
+
528
+ # This dictionary maps commonly seen values for "charset" in HTML
529
+ # meta tags to the corresponding Python codec names. It only covers
530
+ # values that aren't in Python's aliases and can't be determined
531
+ # by the heuristics in find_codec.
532
+ CHARSET_ALIASES = {"macintosh": "mac-roman",
533
+ "x-sjis": "shift-jis"}
534
+
535
+ ENCODINGS_WITH_SMART_QUOTES = [
536
+ "windows-1252",
537
+ "iso-8859-1",
538
+ "iso-8859-2",
539
+ ]
540
+
541
+ def __init__(self, markup, known_definite_encodings=[],
542
+ smart_quotes_to=None, is_html=False, exclude_encodings=[],
543
+ user_encodings=None, override_encodings=None
544
+ ):
545
+ """Constructor.
546
+
547
+ :param markup: A bytestring representing markup in an unknown encoding.
548
+
549
+ :param known_definite_encodings: When determining the encoding
550
+ of `markup`, these encodings will be tried first, in
551
+ order. In HTML terms, this corresponds to the "known
552
+ definite encoding" step defined here:
553
+ https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
554
+
555
+ :param user_encodings: These encodings will be tried after the
556
+ `known_definite_encodings` have been tried and failed, and
557
+ after an attempt to sniff the encoding by looking at a
558
+ byte order mark has failed. In HTML terms, this
559
+ corresponds to the step "user has explicitly instructed
560
+ the user agent to override the document's character
561
+ encoding", defined here:
562
+ https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
563
+
564
+ :param override_encodings: A deprecated alias for
565
+ known_definite_encodings. Any encodings here will be tried
566
+ immediately after the encodings in
567
+ known_definite_encodings.
568
+
569
+ :param smart_quotes_to: By default, Microsoft smart quotes will, like all other characters, be converted
570
+ to Unicode characters. Setting this to 'ascii' will convert them to ASCII quotes instead.
571
+ Setting it to 'xml' will convert them to XML entity references, and setting it to 'html'
572
+ will convert them to HTML entity references.
573
+ :param is_html: If True, this markup is considered to be HTML. Otherwise
574
+ it's assumed to be XML.
575
+ :param exclude_encodings: These encodings will not be considered, even
576
+ if the sniffing code thinks they might make sense.
577
+
578
+ """
579
+ self.smart_quotes_to = smart_quotes_to
580
+ self.tried_encodings = []
581
+ self.contains_replacement_characters = False
582
+ self.is_html = is_html
583
+ self.log = logging.getLogger(__name__)
584
+ self.detector = EncodingDetector(
585
+ markup, known_definite_encodings, is_html, exclude_encodings,
586
+ user_encodings, override_encodings
587
+ )
588
+
589
+ # Short-circuit if the data is in Unicode to begin with.
590
+ if isinstance(markup, str) or markup == '':
591
+ self.markup = markup
592
+ self.unicode_markup = str(markup)
593
+ self.original_encoding = None
594
+ return
595
+
596
+ # The encoding detector may have stripped a byte-order mark.
597
+ # Use the stripped markup from this point on.
598
+ self.markup = self.detector.markup
599
+
600
+ u = None
601
+ for encoding in self.detector.encodings:
602
+ markup = self.detector.markup
603
+ u = self._convert_from(encoding)
604
+ if u is not None:
605
+ break
606
+
607
+ if not u:
608
+ # None of the encodings worked. As an absolute last resort,
609
+ # try them again with character replacement.
610
+
611
+ for encoding in self.detector.encodings:
612
+ if encoding != "ascii":
613
+ u = self._convert_from(encoding, "replace")
614
+ if u is not None:
615
+ self.log.warning(
616
+ "Some characters could not be decoded, and were "
617
+ "replaced with REPLACEMENT CHARACTER."
618
+ )
619
+ self.contains_replacement_characters = True
620
+ break
621
+
622
+ # If none of that worked, we could at this point force it to
623
+ # ASCII, but that would destroy so much data that I think
624
+ # giving up is better.
625
+ self.unicode_markup = u
626
+ if not u:
627
+ self.original_encoding = None
628
+
629
+ def _sub_ms_char(self, match):
630
+ """Changes a MS smart quote character to an XML or HTML
631
+ entity, or an ASCII character."""
632
+ orig = match.group(1)
633
+ if self.smart_quotes_to == 'ascii':
634
+ sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
635
+ else:
636
+ sub = self.MS_CHARS.get(orig)
637
+ if type(sub) == tuple:
638
+ if self.smart_quotes_to == 'xml':
639
+ sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
640
+ else:
641
+ sub = '&'.encode() + sub[0].encode() + ';'.encode()
642
+ else:
643
+ sub = sub.encode()
644
+ return sub
645
+
646
+ def _convert_from(self, proposed, errors="strict"):
647
+ """Attempt to convert the markup to the proposed encoding.
648
+
649
+ :param proposed: The name of a character encoding.
650
+ """
651
+ proposed = self.find_codec(proposed)
652
+ if not proposed or (proposed, errors) in self.tried_encodings:
653
+ return None
654
+ self.tried_encodings.append((proposed, errors))
655
+ markup = self.markup
656
+ # Convert smart quotes to HTML if coming from an encoding
657
+ # that might have them.
658
+ if (self.smart_quotes_to is not None
659
+ and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
660
+ smart_quotes_re = b"([\x80-\x9f])"
661
+ smart_quotes_compiled = re.compile(smart_quotes_re)
662
+ markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
663
+
664
+ try:
665
+ #print("Trying to convert document to %s (errors=%s)" % (
666
+ # proposed, errors))
667
+ u = self._to_unicode(markup, proposed, errors)
668
+ self.markup = u
669
+ self.original_encoding = proposed
670
+ except Exception as e:
671
+ #print("That didn't work!")
672
+ #print(e)
673
+ return None
674
+ #print("Correct encoding: %s" % proposed)
675
+ return self.markup
676
+
677
+ def _to_unicode(self, data, encoding, errors="strict"):
678
+ """Given a string and its encoding, decodes the string into Unicode.
679
+
680
+ :param encoding: The name of an encoding.
681
+ """
682
+ return str(data, encoding, errors)
683
+
684
+ @property
685
+ def declared_html_encoding(self):
686
+ """If the markup is an HTML document, returns the encoding declared _within_
687
+ the document.
688
+ """
689
+ if not self.is_html:
690
+ return None
691
+ return self.detector.declared_encoding
692
+
693
+ def find_codec(self, charset):
694
+ """Convert the name of a character set to a codec name.
695
+
696
+ :param charset: The name of a character set.
697
+ :return: The name of a codec.
698
+ """
699
+ value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
700
+ or (charset and self._codec(charset.replace("-", "")))
701
+ or (charset and self._codec(charset.replace("-", "_")))
702
+ or (charset and charset.lower())
703
+ or charset
704
+ )
705
+ if value:
706
+ return value.lower()
707
+ return None
708
+
709
+ def _codec(self, charset):
710
+ if not charset:
711
+ return charset
712
+ codec = None
713
+ try:
714
+ codecs.lookup(charset)
715
+ codec = charset
716
+ except (LookupError, ValueError):
717
+ pass
718
+ return codec
719
+
720
+
721
+ # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
722
+ MS_CHARS = {b'\x80': ('euro', '20AC'),
723
+ b'\x81': ' ',
724
+ b'\x82': ('sbquo', '201A'),
725
+ b'\x83': ('fnof', '192'),
726
+ b'\x84': ('bdquo', '201E'),
727
+ b'\x85': ('hellip', '2026'),
728
+ b'\x86': ('dagger', '2020'),
729
+ b'\x87': ('Dagger', '2021'),
730
+ b'\x88': ('circ', '2C6'),
731
+ b'\x89': ('permil', '2030'),
732
+ b'\x8A': ('Scaron', '160'),
733
+ b'\x8B': ('lsaquo', '2039'),
734
+ b'\x8C': ('OElig', '152'),
735
+ b'\x8D': '?',
736
+ b'\x8E': ('#x17D', '17D'),
737
+ b'\x8F': '?',
738
+ b'\x90': '?',
739
+ b'\x91': ('lsquo', '2018'),
740
+ b'\x92': ('rsquo', '2019'),
741
+ b'\x93': ('ldquo', '201C'),
742
+ b'\x94': ('rdquo', '201D'),
743
+ b'\x95': ('bull', '2022'),
744
+ b'\x96': ('ndash', '2013'),
745
+ b'\x97': ('mdash', '2014'),
746
+ b'\x98': ('tilde', '2DC'),
747
+ b'\x99': ('trade', '2122'),
748
+ b'\x9a': ('scaron', '161'),
749
+ b'\x9b': ('rsaquo', '203A'),
750
+ b'\x9c': ('oelig', '153'),
751
+ b'\x9d': '?',
752
+ b'\x9e': ('#x17E', '17E'),
753
+ b'\x9f': ('Yuml', ''),}
754
+
755
+ # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
756
+ # horrors like stripping diacritical marks to turn á into a, but also
757
+ # contains non-horrors like turning “ into ".
758
+ MS_CHARS_TO_ASCII = {
759
+ b'\x80' : 'EUR',
760
+ b'\x81' : ' ',
761
+ b'\x82' : ',',
762
+ b'\x83' : 'f',
763
+ b'\x84' : ',,',
764
+ b'\x85' : '...',
765
+ b'\x86' : '+',
766
+ b'\x87' : '++',
767
+ b'\x88' : '^',
768
+ b'\x89' : '%',
769
+ b'\x8a' : 'S',
770
+ b'\x8b' : '<',
771
+ b'\x8c' : 'OE',
772
+ b'\x8d' : '?',
773
+ b'\x8e' : 'Z',
774
+ b'\x8f' : '?',
775
+ b'\x90' : '?',
776
+ b'\x91' : "'",
777
+ b'\x92' : "'",
778
+ b'\x93' : '"',
779
+ b'\x94' : '"',
780
+ b'\x95' : '*',
781
+ b'\x96' : '-',
782
+ b'\x97' : '--',
783
+ b'\x98' : '~',
784
+ b'\x99' : '(TM)',
785
+ b'\x9a' : 's',
786
+ b'\x9b' : '>',
787
+ b'\x9c' : 'oe',
788
+ b'\x9d' : '?',
789
+ b'\x9e' : 'z',
790
+ b'\x9f' : 'Y',
791
+ b'\xa0' : ' ',
792
+ b'\xa1' : '!',
793
+ b'\xa2' : 'c',
794
+ b'\xa3' : 'GBP',
795
+ b'\xa4' : '$', #This approximation is especially parochial--this is the
796
+ #generic currency symbol.
797
+ b'\xa5' : 'YEN',
798
+ b'\xa6' : '|',
799
+ b'\xa7' : 'S',
800
+ b'\xa8' : '..',
801
+ b'\xa9' : '',
802
+ b'\xaa' : '(th)',
803
+ b'\xab' : '<<',
804
+ b'\xac' : '!',
805
+ b'\xad' : ' ',
806
+ b'\xae' : '(R)',
807
+ b'\xaf' : '-',
808
+ b'\xb0' : 'o',
809
+ b'\xb1' : '+-',
810
+ b'\xb2' : '2',
811
+ b'\xb3' : '3',
812
+ b'\xb4' : ("'", 'acute'),
813
+ b'\xb5' : 'u',
814
+ b'\xb6' : 'P',
815
+ b'\xb7' : '*',
816
+ b'\xb8' : ',',
817
+ b'\xb9' : '1',
818
+ b'\xba' : '(th)',
819
+ b'\xbb' : '>>',
820
+ b'\xbc' : '1/4',
821
+ b'\xbd' : '1/2',
822
+ b'\xbe' : '3/4',
823
+ b'\xbf' : '?',
824
+ b'\xc0' : 'A',
825
+ b'\xc1' : 'A',
826
+ b'\xc2' : 'A',
827
+ b'\xc3' : 'A',
828
+ b'\xc4' : 'A',
829
+ b'\xc5' : 'A',
830
+ b'\xc6' : 'AE',
831
+ b'\xc7' : 'C',
832
+ b'\xc8' : 'E',
833
+ b'\xc9' : 'E',
834
+ b'\xca' : 'E',
835
+ b'\xcb' : 'E',
836
+ b'\xcc' : 'I',
837
+ b'\xcd' : 'I',
838
+ b'\xce' : 'I',
839
+ b'\xcf' : 'I',
840
+ b'\xd0' : 'D',
841
+ b'\xd1' : 'N',
842
+ b'\xd2' : 'O',
843
+ b'\xd3' : 'O',
844
+ b'\xd4' : 'O',
845
+ b'\xd5' : 'O',
846
+ b'\xd6' : 'O',
847
+ b'\xd7' : '*',
848
+ b'\xd8' : 'O',
849
+ b'\xd9' : 'U',
850
+ b'\xda' : 'U',
851
+ b'\xdb' : 'U',
852
+ b'\xdc' : 'U',
853
+ b'\xdd' : 'Y',
854
+ b'\xde' : 'b',
855
+ b'\xdf' : 'B',
856
+ b'\xe0' : 'a',
857
+ b'\xe1' : 'a',
858
+ b'\xe2' : 'a',
859
+ b'\xe3' : 'a',
860
+ b'\xe4' : 'a',
861
+ b'\xe5' : 'a',
862
+ b'\xe6' : 'ae',
863
+ b'\xe7' : 'c',
864
+ b'\xe8' : 'e',
865
+ b'\xe9' : 'e',
866
+ b'\xea' : 'e',
867
+ b'\xeb' : 'e',
868
+ b'\xec' : 'i',
869
+ b'\xed' : 'i',
870
+ b'\xee' : 'i',
871
+ b'\xef' : 'i',
872
+ b'\xf0' : 'o',
873
+ b'\xf1' : 'n',
874
+ b'\xf2' : 'o',
875
+ b'\xf3' : 'o',
876
+ b'\xf4' : 'o',
877
+ b'\xf5' : 'o',
878
+ b'\xf6' : 'o',
879
+ b'\xf7' : '/',
880
+ b'\xf8' : 'o',
881
+ b'\xf9' : 'u',
882
+ b'\xfa' : 'u',
883
+ b'\xfb' : 'u',
884
+ b'\xfc' : 'u',
885
+ b'\xfd' : 'y',
886
+ b'\xfe' : 'b',
887
+ b'\xff' : 'y',
888
+ }
889
+
890
+ # A map used when removing rogue Windows-1252/ISO-8859-1
891
+ # characters in otherwise UTF-8 documents.
892
+ #
893
+ # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
894
+ # Windows-1252.
895
+ WINDOWS_1252_TO_UTF8 = {
896
+ 0x80 : b'\xe2\x82\xac', # €
897
+ 0x82 : b'\xe2\x80\x9a', # ‚
898
+ 0x83 : b'\xc6\x92', # ƒ
899
+ 0x84 : b'\xe2\x80\x9e', # „
900
+ 0x85 : b'\xe2\x80\xa6', # …
901
+ 0x86 : b'\xe2\x80\xa0', # †
902
+ 0x87 : b'\xe2\x80\xa1', # ‡
903
+ 0x88 : b'\xcb\x86', # ˆ
904
+ 0x89 : b'\xe2\x80\xb0', # ‰
905
+ 0x8a : b'\xc5\xa0', # Š
906
+ 0x8b : b'\xe2\x80\xb9', # ‹
907
+ 0x8c : b'\xc5\x92', # Œ
908
+ 0x8e : b'\xc5\xbd', # Ž
909
+ 0x91 : b'\xe2\x80\x98', # ‘
910
+ 0x92 : b'\xe2\x80\x99', # ’
911
+ 0x93 : b'\xe2\x80\x9c', # “
912
+ 0x94 : b'\xe2\x80\x9d', # ”
913
+ 0x95 : b'\xe2\x80\xa2', # •
914
+ 0x96 : b'\xe2\x80\x93', # –
915
+ 0x97 : b'\xe2\x80\x94', # —
916
+ 0x98 : b'\xcb\x9c', # ˜
917
+ 0x99 : b'\xe2\x84\xa2', # ™
918
+ 0x9a : b'\xc5\xa1', # š
919
+ 0x9b : b'\xe2\x80\xba', # ›
920
+ 0x9c : b'\xc5\x93', # œ
921
+ 0x9e : b'\xc5\xbe', # ž
922
+ 0x9f : b'\xc5\xb8', # Ÿ
923
+ 0xa0 : b'\xc2\xa0', #  
924
+ 0xa1 : b'\xc2\xa1', # ¡
925
+ 0xa2 : b'\xc2\xa2', # ¢
926
+ 0xa3 : b'\xc2\xa3', # £
927
+ 0xa4 : b'\xc2\xa4', # ¤
928
+ 0xa5 : b'\xc2\xa5', # ¥
929
+ 0xa6 : b'\xc2\xa6', # ¦
930
+ 0xa7 : b'\xc2\xa7', # §
931
+ 0xa8 : b'\xc2\xa8', # ¨
932
+ 0xa9 : b'\xc2\xa9', # ©
933
+ 0xaa : b'\xc2\xaa', # ª
934
+ 0xab : b'\xc2\xab', # «
935
+ 0xac : b'\xc2\xac', # ¬
936
+ 0xad : b'\xc2\xad', # ­
937
+ 0xae : b'\xc2\xae', # ®
938
+ 0xaf : b'\xc2\xaf', # ¯
939
+ 0xb0 : b'\xc2\xb0', # °
940
+ 0xb1 : b'\xc2\xb1', # ±
941
+ 0xb2 : b'\xc2\xb2', # ²
942
+ 0xb3 : b'\xc2\xb3', # ³
943
+ 0xb4 : b'\xc2\xb4', # ´
944
+ 0xb5 : b'\xc2\xb5', # µ
945
+ 0xb6 : b'\xc2\xb6', # ¶
946
+ 0xb7 : b'\xc2\xb7', # ·
947
+ 0xb8 : b'\xc2\xb8', # ¸
948
+ 0xb9 : b'\xc2\xb9', # ¹
949
+ 0xba : b'\xc2\xba', # º
950
+ 0xbb : b'\xc2\xbb', # »
951
+ 0xbc : b'\xc2\xbc', # ¼
952
+ 0xbd : b'\xc2\xbd', # ½
953
+ 0xbe : b'\xc2\xbe', # ¾
954
+ 0xbf : b'\xc2\xbf', # ¿
955
+ 0xc0 : b'\xc3\x80', # À
956
+ 0xc1 : b'\xc3\x81', # Á
957
+ 0xc2 : b'\xc3\x82', # Â
958
+ 0xc3 : b'\xc3\x83', # Ã
959
+ 0xc4 : b'\xc3\x84', # Ä
960
+ 0xc5 : b'\xc3\x85', # Å
961
+ 0xc6 : b'\xc3\x86', # Æ
962
+ 0xc7 : b'\xc3\x87', # Ç
963
+ 0xc8 : b'\xc3\x88', # È
964
+ 0xc9 : b'\xc3\x89', # É
965
+ 0xca : b'\xc3\x8a', # Ê
966
+ 0xcb : b'\xc3\x8b', # Ë
967
+ 0xcc : b'\xc3\x8c', # Ì
968
+ 0xcd : b'\xc3\x8d', # Í
969
+ 0xce : b'\xc3\x8e', # Î
970
+ 0xcf : b'\xc3\x8f', # Ï
971
+ 0xd0 : b'\xc3\x90', # Ð
972
+ 0xd1 : b'\xc3\x91', # Ñ
973
+ 0xd2 : b'\xc3\x92', # Ò
974
+ 0xd3 : b'\xc3\x93', # Ó
975
+ 0xd4 : b'\xc3\x94', # Ô
976
+ 0xd5 : b'\xc3\x95', # Õ
977
+ 0xd6 : b'\xc3\x96', # Ö
978
+ 0xd7 : b'\xc3\x97', # ×
979
+ 0xd8 : b'\xc3\x98', # Ø
980
+ 0xd9 : b'\xc3\x99', # Ù
981
+ 0xda : b'\xc3\x9a', # Ú
982
+ 0xdb : b'\xc3\x9b', # Û
983
+ 0xdc : b'\xc3\x9c', # Ü
984
+ 0xdd : b'\xc3\x9d', # Ý
985
+ 0xde : b'\xc3\x9e', # Þ
986
+ 0xdf : b'\xc3\x9f', # ß
987
+ 0xe0 : b'\xc3\xa0', # à
988
+ 0xe1 : b'\xa1', # á
989
+ 0xe2 : b'\xc3\xa2', # â
990
+ 0xe3 : b'\xc3\xa3', # ã
991
+ 0xe4 : b'\xc3\xa4', # ä
992
+ 0xe5 : b'\xc3\xa5', # å
993
+ 0xe6 : b'\xc3\xa6', # æ
994
+ 0xe7 : b'\xc3\xa7', # ç
995
+ 0xe8 : b'\xc3\xa8', # è
996
+ 0xe9 : b'\xc3\xa9', # é
997
+ 0xea : b'\xc3\xaa', # ê
998
+ 0xeb : b'\xc3\xab', # ë
999
+ 0xec : b'\xc3\xac', # ì
1000
+ 0xed : b'\xc3\xad', # í
1001
+ 0xee : b'\xc3\xae', # î
1002
+ 0xef : b'\xc3\xaf', # ï
1003
+ 0xf0 : b'\xc3\xb0', # ð
1004
+ 0xf1 : b'\xc3\xb1', # ñ
1005
+ 0xf2 : b'\xc3\xb2', # ò
1006
+ 0xf3 : b'\xc3\xb3', # ó
1007
+ 0xf4 : b'\xc3\xb4', # ô
1008
+ 0xf5 : b'\xc3\xb5', # õ
1009
+ 0xf6 : b'\xc3\xb6', # ö
1010
+ 0xf7 : b'\xc3\xb7', # ÷
1011
+ 0xf8 : b'\xc3\xb8', # ø
1012
+ 0xf9 : b'\xc3\xb9', # ù
1013
+ 0xfa : b'\xc3\xba', # ú
1014
+ 0xfb : b'\xc3\xbb', # û
1015
+ 0xfc : b'\xc3\xbc', # ü
1016
+ 0xfd : b'\xc3\xbd', # ý
1017
+ 0xfe : b'\xc3\xbe', # þ
1018
+ }
1019
+
1020
+ MULTIBYTE_MARKERS_AND_SIZES = [
1021
+ (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF
1022
+ (0xe0, 0xef, 3), # 3-byte characters start with E0-EF
1023
+ (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4
1024
+ ]
1025
+
1026
+ FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
1027
+ LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
1028
+
1029
+ @classmethod
1030
+ def detwingle(cls, in_bytes, main_encoding="utf8",
1031
+ embedded_encoding="windows-1252"):
1032
+ """Fix characters from one encoding embedded in some other encoding.
1033
+
1034
+ Currently the only situation supported is Windows-1252 (or its
1035
+ subset ISO-8859-1), embedded in UTF-8.
1036
+
1037
+ :param in_bytes: A bytestring that you suspect contains
1038
+ characters from multiple encodings. Note that this _must_
1039
+ be a bytestring. If you've already converted the document
1040
+ to Unicode, you're too late.
1041
+ :param main_encoding: The primary encoding of `in_bytes`.
1042
+ :param embedded_encoding: The encoding that was used to embed characters
1043
+ in the main document.
1044
+ :return: A bytestring in which `embedded_encoding`
1045
+ characters have been converted to their `main_encoding`
1046
+ equivalents.
1047
+ """
1048
+ if embedded_encoding.replace('_', '-').lower() not in (
1049
+ 'windows-1252', 'windows_1252'):
1050
+ raise NotImplementedError(
1051
+ "Windows-1252 and ISO-8859-1 are the only currently supported "
1052
+ "embedded encodings.")
1053
+
1054
+ if main_encoding.lower() not in ('utf8', 'utf-8'):
1055
+ raise NotImplementedError(
1056
+ "UTF-8 is the only currently supported main encoding.")
1057
+
1058
+ byte_chunks = []
1059
+
1060
+ chunk_start = 0
1061
+ pos = 0
1062
+ while pos < len(in_bytes):
1063
+ byte = in_bytes[pos]
1064
+ if not isinstance(byte, int):
1065
+ # Python 2.x
1066
+ byte = ord(byte)
1067
+ if (byte >= cls.FIRST_MULTIBYTE_MARKER
1068
+ and byte <= cls.LAST_MULTIBYTE_MARKER):
1069
+ # This is the start of a UTF-8 multibyte character. Skip
1070
+ # to the end.
1071
+ for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
1072
+ if byte >= start and byte <= end:
1073
+ pos += size
1074
+ break
1075
+ elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
1076
+ # We found a Windows-1252 character!
1077
+ # Save the string up to this point as a chunk.
1078
+ byte_chunks.append(in_bytes[chunk_start:pos])
1079
+
1080
+ # Now translate the Windows-1252 character into UTF-8
1081
+ # and add it as another, one-byte chunk.
1082
+ byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
1083
+ pos += 1
1084
+ chunk_start = pos
1085
+ else:
1086
+ # Go on to the next character.
1087
+ pos += 1
1088
+ if chunk_start == 0:
1089
+ # The string is unchanged.
1090
+ return in_bytes
1091
+ else:
1092
+ # Store the final chunk.
1093
+ byte_chunks.append(in_bytes[chunk_start:])
1094
+ return b''.join(byte_chunks)
1095
+
vlmpy310/lib/python3.10/site-packages/bs4/diagnose.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Diagnostic functions, mainly for use when doing tech support."""
2
+
3
+ # Use of this source code is governed by the MIT license.
4
+ __license__ = "MIT"
5
+
6
+ import cProfile
7
+ from io import BytesIO
8
+ from html.parser import HTMLParser
9
+ import bs4
10
+ from bs4 import BeautifulSoup, __version__
11
+ from bs4.builder import builder_registry
12
+
13
+ import os
14
+ import pstats
15
+ import random
16
+ import tempfile
17
+ import time
18
+ import traceback
19
+ import sys
20
+ import cProfile
21
+
22
+ def diagnose(data):
23
+ """Diagnostic suite for isolating common problems.
24
+
25
+ :param data: A string containing markup that needs to be explained.
26
+ :return: None; diagnostics are printed to standard output.
27
+ """
28
+ print(("Diagnostic running on Beautiful Soup %s" % __version__))
29
+ print(("Python version %s" % sys.version))
30
+
31
+ basic_parsers = ["html.parser", "html5lib", "lxml"]
32
+ for name in basic_parsers:
33
+ for builder in builder_registry.builders:
34
+ if name in builder.features:
35
+ break
36
+ else:
37
+ basic_parsers.remove(name)
38
+ print((
39
+ "I noticed that %s is not installed. Installing it may help." %
40
+ name))
41
+
42
+ if 'lxml' in basic_parsers:
43
+ basic_parsers.append("lxml-xml")
44
+ try:
45
+ from lxml import etree
46
+ print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
47
+ except ImportError as e:
48
+ print(
49
+ "lxml is not installed or couldn't be imported.")
50
+
51
+
52
+ if 'html5lib' in basic_parsers:
53
+ try:
54
+ import html5lib
55
+ print(("Found html5lib version %s" % html5lib.__version__))
56
+ except ImportError as e:
57
+ print(
58
+ "html5lib is not installed or couldn't be imported.")
59
+
60
+ if hasattr(data, 'read'):
61
+ data = data.read()
62
+
63
+ for parser in basic_parsers:
64
+ print(("Trying to parse your markup with %s" % parser))
65
+ success = False
66
+ try:
67
+ soup = BeautifulSoup(data, features=parser)
68
+ success = True
69
+ except Exception as e:
70
+ print(("%s could not parse the markup." % parser))
71
+ traceback.print_exc()
72
+ if success:
73
+ print(("Here's what %s did with the markup:" % parser))
74
+ print((soup.prettify()))
75
+
76
+ print(("-" * 80))
77
+
78
+ def lxml_trace(data, html=True, **kwargs):
79
+ """Print out the lxml events that occur during parsing.
80
+
81
+ This lets you see how lxml parses a document when no Beautiful
82
+ Soup code is running. You can use this to determine whether
83
+ an lxml-specific problem is in Beautiful Soup's lxml tree builders
84
+ or in lxml itself.
85
+
86
+ :param data: Some markup.
87
+ :param html: If True, markup will be parsed with lxml's HTML parser.
88
+ if False, lxml's XML parser will be used.
89
+ """
90
+ from lxml import etree
91
+ recover = kwargs.pop('recover', True)
92
+ if isinstance(data, str):
93
+ data = data.encode("utf8")
94
+ reader = BytesIO(data)
95
+ for event, element in etree.iterparse(
96
+ reader, html=html, recover=recover, **kwargs
97
+ ):
98
+ print(("%s, %4s, %s" % (event, element.tag, element.text)))
99
+
100
+ class AnnouncingParser(HTMLParser):
101
+ """Subclass of HTMLParser that announces parse events, without doing
102
+ anything else.
103
+
104
+ You can use this to get a picture of how html.parser sees a given
105
+ document. The easiest way to do this is to call `htmlparser_trace`.
106
+ """
107
+
108
+ def _p(self, s):
109
+ print(s)
110
+
111
+ def handle_starttag(self, name, attrs):
112
+ self._p("%s START" % name)
113
+
114
+ def handle_endtag(self, name):
115
+ self._p("%s END" % name)
116
+
117
+ def handle_data(self, data):
118
+ self._p("%s DATA" % data)
119
+
120
+ def handle_charref(self, name):
121
+ self._p("%s CHARREF" % name)
122
+
123
+ def handle_entityref(self, name):
124
+ self._p("%s ENTITYREF" % name)
125
+
126
+ def handle_comment(self, data):
127
+ self._p("%s COMMENT" % data)
128
+
129
+ def handle_decl(self, data):
130
+ self._p("%s DECL" % data)
131
+
132
+ def unknown_decl(self, data):
133
+ self._p("%s UNKNOWN-DECL" % data)
134
+
135
+ def handle_pi(self, data):
136
+ self._p("%s PI" % data)
137
+
138
+ def htmlparser_trace(data):
139
+ """Print out the HTMLParser events that occur during parsing.
140
+
141
+ This lets you see how HTMLParser parses a document when no
142
+ Beautiful Soup code is running.
143
+
144
+ :param data: Some markup.
145
+ """
146
+ parser = AnnouncingParser()
147
+ parser.feed(data)
148
+
149
+ _vowels = "aeiou"
150
+ _consonants = "bcdfghjklmnpqrstvwxyz"
151
+
152
+ def rword(length=5):
153
+ "Generate a random word-like string."
154
+ s = ''
155
+ for i in range(length):
156
+ if i % 2 == 0:
157
+ t = _consonants
158
+ else:
159
+ t = _vowels
160
+ s += random.choice(t)
161
+ return s
162
+
163
+ def rsentence(length=4):
164
+ "Generate a random sentence-like string."
165
+ return " ".join(rword(random.randint(4,9)) for i in range(length))
166
+
167
+ def rdoc(num_elements=1000):
168
+ """Randomly generate an invalid HTML document."""
169
+ tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
170
+ elements = []
171
+ for i in range(num_elements):
172
+ choice = random.randint(0,3)
173
+ if choice == 0:
174
+ # New tag.
175
+ tag_name = random.choice(tag_names)
176
+ elements.append("<%s>" % tag_name)
177
+ elif choice == 1:
178
+ elements.append(rsentence(random.randint(1,4)))
179
+ elif choice == 2:
180
+ # Close a tag.
181
+ tag_name = random.choice(tag_names)
182
+ elements.append("</%s>" % tag_name)
183
+ return "<html>" + "\n".join(elements) + "</html>"
184
+
185
+ def benchmark_parsers(num_elements=100000):
186
+ """Very basic head-to-head performance benchmark."""
187
+ print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
188
+ data = rdoc(num_elements)
189
+ print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
190
+
191
+ for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
192
+ success = False
193
+ try:
194
+ a = time.time()
195
+ soup = BeautifulSoup(data, parser)
196
+ b = time.time()
197
+ success = True
198
+ except Exception as e:
199
+ print(("%s could not parse the markup." % parser))
200
+ traceback.print_exc()
201
+ if success:
202
+ print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
203
+
204
+ from lxml import etree
205
+ a = time.time()
206
+ etree.HTML(data)
207
+ b = time.time()
208
+ print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
209
+
210
+ import html5lib
211
+ parser = html5lib.HTMLParser()
212
+ a = time.time()
213
+ parser.parse(data)
214
+ b = time.time()
215
+ print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
216
+
217
+ def profile(num_elements=100000, parser="lxml"):
218
+ """Use Python's profiler on a randomly generated document."""
219
+ filehandle = tempfile.NamedTemporaryFile()
220
+ filename = filehandle.name
221
+
222
+ data = rdoc(num_elements)
223
+ vars = dict(bs4=bs4, data=data, parser=parser)
224
+ cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
225
+
226
+ stats = pstats.Stats(filename)
227
+ # stats.strip_dirs()
228
+ stats.sort_stats("cumulative")
229
+ stats.print_stats('_html5lib|bs4', 50)
230
+
231
+ # If this file is run as a script, standard input is diagnosed.
232
+ if __name__ == '__main__':
233
+ diagnose(sys.stdin.read())
vlmpy310/lib/python3.10/site-packages/bs4/element.py ADDED
@@ -0,0 +1,2435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use of this source code is governed by the MIT license.
2
+ __license__ = "MIT"
3
+
4
+ try:
5
+ from collections.abc import Callable # Python 3.6
6
+ except ImportError as e:
7
+ from collections import Callable
8
+ import re
9
+ import sys
10
+ import warnings
11
+
12
+ from bs4.css import CSS
13
+ from bs4.formatter import (
14
+ Formatter,
15
+ HTMLFormatter,
16
+ XMLFormatter,
17
+ )
18
+
19
+ DEFAULT_OUTPUT_ENCODING = "utf-8"
20
+
21
+ nonwhitespace_re = re.compile(r"\S+")
22
+
23
+ # NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on
24
+ # the off chance someone imported it for their own use.
25
+ whitespace_re = re.compile(r"\s+")
26
+
27
+ def _alias(attr):
28
+ """Alias one attribute name to another for backward compatibility"""
29
+ @property
30
+ def alias(self):
31
+ return getattr(self, attr)
32
+
33
+ @alias.setter
34
+ def alias(self):
35
+ return setattr(self, attr)
36
+ return alias
37
+
38
+
39
+ # These encodings are recognized by Python (so PageElement.encode
40
+ # could theoretically support them) but XML and HTML don't recognize
41
+ # them (so they should not show up in an XML or HTML document as that
42
+ # document's encoding).
43
+ #
44
+ # If an XML document is encoded in one of these encodings, no encoding
45
+ # will be mentioned in the XML declaration. If an HTML document is
46
+ # encoded in one of these encodings, and the HTML document has a
47
+ # <meta> tag that mentions an encoding, the encoding will be given as
48
+ # the empty string.
49
+ #
50
+ # Source:
51
+ # https://docs.python.org/3/library/codecs.html#python-specific-encodings
52
+ PYTHON_SPECIFIC_ENCODINGS = set([
53
+ "idna",
54
+ "mbcs",
55
+ "oem",
56
+ "palmos",
57
+ "punycode",
58
+ "raw_unicode_escape",
59
+ "undefined",
60
+ "unicode_escape",
61
+ "raw-unicode-escape",
62
+ "unicode-escape",
63
+ "string-escape",
64
+ "string_escape",
65
+ ])
66
+
67
+
68
+ class NamespacedAttribute(str):
69
+ """A namespaced string (e.g. 'xml:lang') that remembers the namespace
70
+ ('xml') and the name ('lang') that were used to create it.
71
+ """
72
+
73
+ def __new__(cls, prefix, name=None, namespace=None):
74
+ if not name:
75
+ # This is the default namespace. Its name "has no value"
76
+ # per https://www.w3.org/TR/xml-names/#defaulting
77
+ name = None
78
+
79
+ if not name:
80
+ obj = str.__new__(cls, prefix)
81
+ elif not prefix:
82
+ # Not really namespaced.
83
+ obj = str.__new__(cls, name)
84
+ else:
85
+ obj = str.__new__(cls, prefix + ":" + name)
86
+ obj.prefix = prefix
87
+ obj.name = name
88
+ obj.namespace = namespace
89
+ return obj
90
+
91
+ class AttributeValueWithCharsetSubstitution(str):
92
+ """A stand-in object for a character encoding specified in HTML."""
93
+
94
+ class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
95
+ """A generic stand-in for the value of a meta tag's 'charset' attribute.
96
+
97
+ When Beautiful Soup parses the markup '<meta charset="utf8">', the
98
+ value of the 'charset' attribute will be one of these objects.
99
+ """
100
+
101
+ def __new__(cls, original_value):
102
+ obj = str.__new__(cls, original_value)
103
+ obj.original_value = original_value
104
+ return obj
105
+
106
+ def encode(self, encoding):
107
+ """When an HTML document is being encoded to a given encoding, the
108
+ value of a meta tag's 'charset' is the name of the encoding.
109
+ """
110
+ if encoding in PYTHON_SPECIFIC_ENCODINGS:
111
+ return ''
112
+ return encoding
113
+
114
+
115
+ class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
116
+ """A generic stand-in for the value of a meta tag's 'content' attribute.
117
+
118
+ When Beautiful Soup parses the markup:
119
+ <meta http-equiv="content-type" content="text/html; charset=utf8">
120
+
121
+ The value of the 'content' attribute will be one of these objects.
122
+ """
123
+
124
+ CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M)
125
+
126
+ def __new__(cls, original_value):
127
+ match = cls.CHARSET_RE.search(original_value)
128
+ if match is None:
129
+ # No substitution necessary.
130
+ return str.__new__(str, original_value)
131
+
132
+ obj = str.__new__(cls, original_value)
133
+ obj.original_value = original_value
134
+ return obj
135
+
136
+ def encode(self, encoding):
137
+ if encoding in PYTHON_SPECIFIC_ENCODINGS:
138
+ return ''
139
+ def rewrite(match):
140
+ return match.group(1) + encoding
141
+ return self.CHARSET_RE.sub(rewrite, self.original_value)
142
+
143
+
144
+ class PageElement(object):
145
+ """Contains the navigational information for some part of the page:
146
+ that is, its current location in the parse tree.
147
+
148
+ NavigableString, Tag, etc. are all subclasses of PageElement.
149
+ """
150
+
151
+ # In general, we can't tell just by looking at an element whether
152
+ # it's contained in an XML document or an HTML document. But for
153
+ # Tags (q.v.) we can store this information at parse time.
154
+ known_xml = None
155
+
156
+ def setup(self, parent=None, previous_element=None, next_element=None,
157
+ previous_sibling=None, next_sibling=None):
158
+ """Sets up the initial relations between this element and
159
+ other elements.
160
+
161
+ :param parent: The parent of this element.
162
+
163
+ :param previous_element: The element parsed immediately before
164
+ this one.
165
+
166
+ :param next_element: The element parsed immediately before
167
+ this one.
168
+
169
+ :param previous_sibling: The most recently encountered element
170
+ on the same level of the parse tree as this one.
171
+
172
+ :param previous_sibling: The next element to be encountered
173
+ on the same level of the parse tree as this one.
174
+ """
175
+ self.parent = parent
176
+
177
+ self.previous_element = previous_element
178
+ if previous_element is not None:
179
+ self.previous_element.next_element = self
180
+
181
+ self.next_element = next_element
182
+ if self.next_element is not None:
183
+ self.next_element.previous_element = self
184
+
185
+ self.next_sibling = next_sibling
186
+ if self.next_sibling is not None:
187
+ self.next_sibling.previous_sibling = self
188
+
189
+ if (previous_sibling is None
190
+ and self.parent is not None and self.parent.contents):
191
+ previous_sibling = self.parent.contents[-1]
192
+
193
+ self.previous_sibling = previous_sibling
194
+ if previous_sibling is not None:
195
+ self.previous_sibling.next_sibling = self
196
+
197
+ def format_string(self, s, formatter):
198
+ """Format the given string using the given formatter.
199
+
200
+ :param s: A string.
201
+ :param formatter: A Formatter object, or a string naming one of the standard formatters.
202
+ """
203
+ if formatter is None:
204
+ return s
205
+ if not isinstance(formatter, Formatter):
206
+ formatter = self.formatter_for_name(formatter)
207
+ output = formatter.substitute(s)
208
+ return output
209
+
210
+ def formatter_for_name(self, formatter):
211
+ """Look up or create a Formatter for the given identifier,
212
+ if necessary.
213
+
214
+ :param formatter: Can be a Formatter object (used as-is), a
215
+ function (used as the entity substitution hook for an
216
+ XMLFormatter or HTMLFormatter), or a string (used to look
217
+ up an XMLFormatter or HTMLFormatter in the appropriate
218
+ registry.
219
+ """
220
+ if isinstance(formatter, Formatter):
221
+ return formatter
222
+ if self._is_xml:
223
+ c = XMLFormatter
224
+ else:
225
+ c = HTMLFormatter
226
+ if isinstance(formatter, Callable):
227
+ return c(entity_substitution=formatter)
228
+ return c.REGISTRY[formatter]
229
+
230
+ @property
231
+ def _is_xml(self):
232
+ """Is this element part of an XML tree or an HTML tree?
233
+
234
+ This is used in formatter_for_name, when deciding whether an
235
+ XMLFormatter or HTMLFormatter is more appropriate. It can be
236
+ inefficient, but it should be called very rarely.
237
+ """
238
+ if self.known_xml is not None:
239
+ # Most of the time we will have determined this when the
240
+ # document is parsed.
241
+ return self.known_xml
242
+
243
+ # Otherwise, it's likely that this element was created by
244
+ # direct invocation of the constructor from within the user's
245
+ # Python code.
246
+ if self.parent is None:
247
+ # This is the top-level object. It should have .known_xml set
248
+ # from tree creation. If not, take a guess--BS is usually
249
+ # used on HTML markup.
250
+ return getattr(self, 'is_xml', False)
251
+ return self.parent._is_xml
252
+
253
+ nextSibling = _alias("next_sibling") # BS3
254
+ previousSibling = _alias("previous_sibling") # BS3
255
+
256
+ default = object()
257
+ def _all_strings(self, strip=False, types=default):
258
+ """Yield all strings of certain classes, possibly stripping them.
259
+
260
+ This is implemented differently in Tag and NavigableString.
261
+ """
262
+ raise NotImplementedError()
263
+
264
+ @property
265
+ def stripped_strings(self):
266
+ """Yield all strings in this PageElement, stripping them first.
267
+
268
+ :yield: A sequence of stripped strings.
269
+ """
270
+ for string in self._all_strings(True):
271
+ yield string
272
+
273
+ def get_text(self, separator="", strip=False,
274
+ types=default):
275
+ """Get all child strings of this PageElement, concatenated using the
276
+ given separator.
277
+
278
+ :param separator: Strings will be concatenated using this separator.
279
+
280
+ :param strip: If True, strings will be stripped before being
281
+ concatenated.
282
+
283
+ :param types: A tuple of NavigableString subclasses. Any
284
+ strings of a subclass not found in this list will be
285
+ ignored. Although there are exceptions, the default
286
+ behavior in most cases is to consider only NavigableString
287
+ and CData objects. That means no comments, processing
288
+ instructions, etc.
289
+
290
+ :return: A string.
291
+ """
292
+ return separator.join([s for s in self._all_strings(
293
+ strip, types=types)])
294
+ getText = get_text
295
+ text = property(get_text)
296
+
297
+ def replace_with(self, *args):
298
+ """Replace this PageElement with one or more PageElements, keeping the
299
+ rest of the tree the same.
300
+
301
+ :param args: One or more PageElements.
302
+ :return: `self`, no longer part of the tree.
303
+ """
304
+ if self.parent is None:
305
+ raise ValueError(
306
+ "Cannot replace one element with another when the "
307
+ "element to be replaced is not part of a tree.")
308
+ if len(args) == 1 and args[0] is self:
309
+ return
310
+ if any(x is self.parent for x in args):
311
+ raise ValueError("Cannot replace a Tag with its parent.")
312
+ old_parent = self.parent
313
+ my_index = self.parent.index(self)
314
+ self.extract(_self_index=my_index)
315
+ for idx, replace_with in enumerate(args, start=my_index):
316
+ old_parent.insert(idx, replace_with)
317
+ return self
318
+ replaceWith = replace_with # BS3
319
+
320
+ def unwrap(self):
321
+ """Replace this PageElement with its contents.
322
+
323
+ :return: `self`, no longer part of the tree.
324
+ """
325
+ my_parent = self.parent
326
+ if self.parent is None:
327
+ raise ValueError(
328
+ "Cannot replace an element with its contents when that"
329
+ "element is not part of a tree.")
330
+ my_index = self.parent.index(self)
331
+ self.extract(_self_index=my_index)
332
+ for child in reversed(self.contents[:]):
333
+ my_parent.insert(my_index, child)
334
+ return self
335
+ replace_with_children = unwrap
336
+ replaceWithChildren = unwrap # BS3
337
+
338
+ def wrap(self, wrap_inside):
339
+ """Wrap this PageElement inside another one.
340
+
341
+ :param wrap_inside: A PageElement.
342
+ :return: `wrap_inside`, occupying the position in the tree that used
343
+ to be occupied by `self`, and with `self` inside it.
344
+ """
345
+ me = self.replace_with(wrap_inside)
346
+ wrap_inside.append(me)
347
+ return wrap_inside
348
+
349
+ def extract(self, _self_index=None):
350
+ """Destructively rips this element out of the tree.
351
+
352
+ :param _self_index: The location of this element in its parent's
353
+ .contents, if known. Passing this in allows for a performance
354
+ optimization.
355
+
356
+ :return: `self`, no longer part of the tree.
357
+ """
358
+ if self.parent is not None:
359
+ if _self_index is None:
360
+ _self_index = self.parent.index(self)
361
+ del self.parent.contents[_self_index]
362
+
363
+ #Find the two elements that would be next to each other if
364
+ #this element (and any children) hadn't been parsed. Connect
365
+ #the two.
366
+ last_child = self._last_descendant()
367
+ next_element = last_child.next_element
368
+
369
+ if (self.previous_element is not None and
370
+ self.previous_element is not next_element):
371
+ self.previous_element.next_element = next_element
372
+ if next_element is not None and next_element is not self.previous_element:
373
+ next_element.previous_element = self.previous_element
374
+ self.previous_element = None
375
+ last_child.next_element = None
376
+
377
+ self.parent = None
378
+ if (self.previous_sibling is not None
379
+ and self.previous_sibling is not self.next_sibling):
380
+ self.previous_sibling.next_sibling = self.next_sibling
381
+ if (self.next_sibling is not None
382
+ and self.next_sibling is not self.previous_sibling):
383
+ self.next_sibling.previous_sibling = self.previous_sibling
384
+ self.previous_sibling = self.next_sibling = None
385
+ return self
386
+
387
+ def _last_descendant(self, is_initialized=True, accept_self=True):
388
+ """Finds the last element beneath this object to be parsed.
389
+
390
+ :param is_initialized: Has `setup` been called on this PageElement
391
+ yet?
392
+ :param accept_self: Is `self` an acceptable answer to the question?
393
+ """
394
+ if is_initialized and self.next_sibling is not None:
395
+ last_child = self.next_sibling.previous_element
396
+ else:
397
+ last_child = self
398
+ while isinstance(last_child, Tag) and last_child.contents:
399
+ last_child = last_child.contents[-1]
400
+ if not accept_self and last_child is self:
401
+ last_child = None
402
+ return last_child
403
+ # BS3: Not part of the API!
404
+ _lastRecursiveChild = _last_descendant
405
+
406
+ def insert(self, position, new_child):
407
+ """Insert a new PageElement in the list of this PageElement's children.
408
+
409
+ This works the same way as `list.insert`.
410
+
411
+ :param position: The numeric position that should be occupied
412
+ in `self.children` by the new PageElement.
413
+ :param new_child: A PageElement.
414
+ """
415
+ if new_child is None:
416
+ raise ValueError("Cannot insert None into a tag.")
417
+ if new_child is self:
418
+ raise ValueError("Cannot insert a tag into itself.")
419
+ if (isinstance(new_child, str)
420
+ and not isinstance(new_child, NavigableString)):
421
+ new_child = NavigableString(new_child)
422
+
423
+ from bs4 import BeautifulSoup
424
+ if isinstance(new_child, BeautifulSoup):
425
+ # We don't want to end up with a situation where one BeautifulSoup
426
+ # object contains another. Insert the children one at a time.
427
+ for subchild in list(new_child.contents):
428
+ self.insert(position, subchild)
429
+ position += 1
430
+ return
431
+ position = min(position, len(self.contents))
432
+ if hasattr(new_child, 'parent') and new_child.parent is not None:
433
+ # We're 'inserting' an element that's already one
434
+ # of this object's children.
435
+ if new_child.parent is self:
436
+ current_index = self.index(new_child)
437
+ if current_index < position:
438
+ # We're moving this element further down the list
439
+ # of this object's children. That means that when
440
+ # we extract this element, our target index will
441
+ # jump down one.
442
+ position -= 1
443
+ new_child.extract()
444
+
445
+ new_child.parent = self
446
+ previous_child = None
447
+ if position == 0:
448
+ new_child.previous_sibling = None
449
+ new_child.previous_element = self
450
+ else:
451
+ previous_child = self.contents[position - 1]
452
+ new_child.previous_sibling = previous_child
453
+ new_child.previous_sibling.next_sibling = new_child
454
+ new_child.previous_element = previous_child._last_descendant(False)
455
+ if new_child.previous_element is not None:
456
+ new_child.previous_element.next_element = new_child
457
+
458
+ new_childs_last_element = new_child._last_descendant(False)
459
+
460
+ if position >= len(self.contents):
461
+ new_child.next_sibling = None
462
+
463
+ parent = self
464
+ parents_next_sibling = None
465
+ while parents_next_sibling is None and parent is not None:
466
+ parents_next_sibling = parent.next_sibling
467
+ parent = parent.parent
468
+ if parents_next_sibling is not None:
469
+ # We found the element that comes next in the document.
470
+ break
471
+ if parents_next_sibling is not None:
472
+ new_childs_last_element.next_element = parents_next_sibling
473
+ else:
474
+ # The last element of this tag is the last element in
475
+ # the document.
476
+ new_childs_last_element.next_element = None
477
+ else:
478
+ next_child = self.contents[position]
479
+ new_child.next_sibling = next_child
480
+ if new_child.next_sibling is not None:
481
+ new_child.next_sibling.previous_sibling = new_child
482
+ new_childs_last_element.next_element = next_child
483
+
484
+ if new_childs_last_element.next_element is not None:
485
+ new_childs_last_element.next_element.previous_element = new_childs_last_element
486
+ self.contents.insert(position, new_child)
487
+
488
+ def append(self, tag):
489
+ """Appends the given PageElement to the contents of this one.
490
+
491
+ :param tag: A PageElement.
492
+ """
493
+ self.insert(len(self.contents), tag)
494
+
495
+ def extend(self, tags):
496
+ """Appends the given PageElements to this one's contents.
497
+
498
+ :param tags: A list of PageElements. If a single Tag is
499
+ provided instead, this PageElement's contents will be extended
500
+ with that Tag's contents.
501
+ """
502
+ if isinstance(tags, Tag):
503
+ tags = tags.contents
504
+ if isinstance(tags, list):
505
+ # Moving items around the tree may change their position in
506
+ # the original list. Make a list that won't change.
507
+ tags = list(tags)
508
+ for tag in tags:
509
+ self.append(tag)
510
+
511
+ def insert_before(self, *args):
512
+ """Makes the given element(s) the immediate predecessor of this one.
513
+
514
+ All the elements will have the same parent, and the given elements
515
+ will be immediately before this one.
516
+
517
+ :param args: One or more PageElements.
518
+ """
519
+ parent = self.parent
520
+ if parent is None:
521
+ raise ValueError(
522
+ "Element has no parent, so 'before' has no meaning.")
523
+ if any(x is self for x in args):
524
+ raise ValueError("Can't insert an element before itself.")
525
+ for predecessor in args:
526
+ # Extract first so that the index won't be screwed up if they
527
+ # are siblings.
528
+ if isinstance(predecessor, PageElement):
529
+ predecessor.extract()
530
+ index = parent.index(self)
531
+ parent.insert(index, predecessor)
532
+
533
+ def insert_after(self, *args):
534
+ """Makes the given element(s) the immediate successor of this one.
535
+
536
+ The elements will have the same parent, and the given elements
537
+ will be immediately after this one.
538
+
539
+ :param args: One or more PageElements.
540
+ """
541
+ # Do all error checking before modifying the tree.
542
+ parent = self.parent
543
+ if parent is None:
544
+ raise ValueError(
545
+ "Element has no parent, so 'after' has no meaning.")
546
+ if any(x is self for x in args):
547
+ raise ValueError("Can't insert an element after itself.")
548
+
549
+ offset = 0
550
+ for successor in args:
551
+ # Extract first so that the index won't be screwed up if they
552
+ # are siblings.
553
+ if isinstance(successor, PageElement):
554
+ successor.extract()
555
+ index = parent.index(self)
556
+ parent.insert(index+1+offset, successor)
557
+ offset += 1
558
+
559
+ def find_next(self, name=None, attrs={}, string=None, **kwargs):
560
+ """Find the first PageElement that matches the given criteria and
561
+ appears later in the document than this PageElement.
562
+
563
+ All find_* methods take a common set of arguments. See the online
564
+ documentation for detailed explanations.
565
+
566
+ :param name: A filter on tag name.
567
+ :param attrs: A dictionary of filters on attribute values.
568
+ :param string: A filter for a NavigableString with specific text.
569
+ :kwargs: A dictionary of filters on attribute values.
570
+ :return: A PageElement.
571
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
572
+ """
573
+ return self._find_one(self.find_all_next, name, attrs, string, **kwargs)
574
+ findNext = find_next # BS3
575
+
576
+ def find_all_next(self, name=None, attrs={}, string=None, limit=None,
577
+ **kwargs):
578
+ """Find all PageElements that match the given criteria and appear
579
+ later in the document than this PageElement.
580
+
581
+ All find_* methods take a common set of arguments. See the online
582
+ documentation for detailed explanations.
583
+
584
+ :param name: A filter on tag name.
585
+ :param attrs: A dictionary of filters on attribute values.
586
+ :param string: A filter for a NavigableString with specific text.
587
+ :param limit: Stop looking after finding this many results.
588
+ :kwargs: A dictionary of filters on attribute values.
589
+ :return: A ResultSet containing PageElements.
590
+ """
591
+ _stacklevel = kwargs.pop('_stacklevel', 2)
592
+ return self._find_all(name, attrs, string, limit, self.next_elements,
593
+ _stacklevel=_stacklevel+1, **kwargs)
594
+ findAllNext = find_all_next # BS3
595
+
596
+ def find_next_sibling(self, name=None, attrs={}, string=None, **kwargs):
597
+ """Find the closest sibling to this PageElement that matches the
598
+ given criteria and appears later in the document.
599
+
600
+ All find_* methods take a common set of arguments. See the
601
+ online documentation for detailed explanations.
602
+
603
+ :param name: A filter on tag name.
604
+ :param attrs: A dictionary of filters on attribute values.
605
+ :param string: A filter for a NavigableString with specific text.
606
+ :kwargs: A dictionary of filters on attribute values.
607
+ :return: A PageElement.
608
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
609
+ """
610
+ return self._find_one(self.find_next_siblings, name, attrs, string,
611
+ **kwargs)
612
+ findNextSibling = find_next_sibling # BS3
613
+
614
+ def find_next_siblings(self, name=None, attrs={}, string=None, limit=None,
615
+ **kwargs):
616
+ """Find all siblings of this PageElement that match the given criteria
617
+ and appear later in the document.
618
+
619
+ All find_* methods take a common set of arguments. See the online
620
+ documentation for detailed explanations.
621
+
622
+ :param name: A filter on tag name.
623
+ :param attrs: A dictionary of filters on attribute values.
624
+ :param string: A filter for a NavigableString with specific text.
625
+ :param limit: Stop looking after finding this many results.
626
+ :kwargs: A dictionary of filters on attribute values.
627
+ :return: A ResultSet of PageElements.
628
+ :rtype: bs4.element.ResultSet
629
+ """
630
+ _stacklevel = kwargs.pop('_stacklevel', 2)
631
+ return self._find_all(
632
+ name, attrs, string, limit,
633
+ self.next_siblings, _stacklevel=_stacklevel+1, **kwargs
634
+ )
635
+ findNextSiblings = find_next_siblings # BS3
636
+ fetchNextSiblings = find_next_siblings # BS2
637
+
638
+ def find_previous(self, name=None, attrs={}, string=None, **kwargs):
639
+ """Look backwards in the document from this PageElement and find the
640
+ first PageElement that matches the given criteria.
641
+
642
+ All find_* methods take a common set of arguments. See the online
643
+ documentation for detailed explanations.
644
+
645
+ :param name: A filter on tag name.
646
+ :param attrs: A dictionary of filters on attribute values.
647
+ :param string: A filter for a NavigableString with specific text.
648
+ :kwargs: A dictionary of filters on attribute values.
649
+ :return: A PageElement.
650
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
651
+ """
652
+ return self._find_one(
653
+ self.find_all_previous, name, attrs, string, **kwargs)
654
+ findPrevious = find_previous # BS3
655
+
656
+ def find_all_previous(self, name=None, attrs={}, string=None, limit=None,
657
+ **kwargs):
658
+ """Look backwards in the document from this PageElement and find all
659
+ PageElements that match the given criteria.
660
+
661
+ All find_* methods take a common set of arguments. See the online
662
+ documentation for detailed explanations.
663
+
664
+ :param name: A filter on tag name.
665
+ :param attrs: A dictionary of filters on attribute values.
666
+ :param string: A filter for a NavigableString with specific text.
667
+ :param limit: Stop looking after finding this many results.
668
+ :kwargs: A dictionary of filters on attribute values.
669
+ :return: A ResultSet of PageElements.
670
+ :rtype: bs4.element.ResultSet
671
+ """
672
+ _stacklevel = kwargs.pop('_stacklevel', 2)
673
+ return self._find_all(
674
+ name, attrs, string, limit, self.previous_elements,
675
+ _stacklevel=_stacklevel+1, **kwargs
676
+ )
677
+ findAllPrevious = find_all_previous # BS3
678
+ fetchPrevious = find_all_previous # BS2
679
+
680
+ def find_previous_sibling(self, name=None, attrs={}, string=None, **kwargs):
681
+ """Returns the closest sibling to this PageElement that matches the
682
+ given criteria and appears earlier in the document.
683
+
684
+ All find_* methods take a common set of arguments. See the online
685
+ documentation for detailed explanations.
686
+
687
+ :param name: A filter on tag name.
688
+ :param attrs: A dictionary of filters on attribute values.
689
+ :param string: A filter for a NavigableString with specific text.
690
+ :kwargs: A dictionary of filters on attribute values.
691
+ :return: A PageElement.
692
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
693
+ """
694
+ return self._find_one(self.find_previous_siblings, name, attrs, string,
695
+ **kwargs)
696
+ findPreviousSibling = find_previous_sibling # BS3
697
+
698
+ def find_previous_siblings(self, name=None, attrs={}, string=None,
699
+ limit=None, **kwargs):
700
+ """Returns all siblings to this PageElement that match the
701
+ given criteria and appear earlier in the document.
702
+
703
+ All find_* methods take a common set of arguments. See the online
704
+ documentation for detailed explanations.
705
+
706
+ :param name: A filter on tag name.
707
+ :param attrs: A dictionary of filters on attribute values.
708
+ :param string: A filter for a NavigableString with specific text.
709
+ :param limit: Stop looking after finding this many results.
710
+ :kwargs: A dictionary of filters on attribute values.
711
+ :return: A ResultSet of PageElements.
712
+ :rtype: bs4.element.ResultSet
713
+ """
714
+ _stacklevel = kwargs.pop('_stacklevel', 2)
715
+ return self._find_all(
716
+ name, attrs, string, limit,
717
+ self.previous_siblings, _stacklevel=_stacklevel+1, **kwargs
718
+ )
719
+ findPreviousSiblings = find_previous_siblings # BS3
720
+ fetchPreviousSiblings = find_previous_siblings # BS2
721
+
722
+ def find_parent(self, name=None, attrs={}, **kwargs):
723
+ """Find the closest parent of this PageElement that matches the given
724
+ criteria.
725
+
726
+ All find_* methods take a common set of arguments. See the online
727
+ documentation for detailed explanations.
728
+
729
+ :param name: A filter on tag name.
730
+ :param attrs: A dictionary of filters on attribute values.
731
+ :kwargs: A dictionary of filters on attribute values.
732
+
733
+ :return: A PageElement.
734
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
735
+ """
736
+ # NOTE: We can't use _find_one because findParents takes a different
737
+ # set of arguments.
738
+ r = None
739
+ l = self.find_parents(name, attrs, 1, _stacklevel=3, **kwargs)
740
+ if l:
741
+ r = l[0]
742
+ return r
743
+ findParent = find_parent # BS3
744
+
745
+ def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
746
+ """Find all parents of this PageElement that match the given criteria.
747
+
748
+ All find_* methods take a common set of arguments. See the online
749
+ documentation for detailed explanations.
750
+
751
+ :param name: A filter on tag name.
752
+ :param attrs: A dictionary of filters on attribute values.
753
+ :param limit: Stop looking after finding this many results.
754
+ :kwargs: A dictionary of filters on attribute values.
755
+
756
+ :return: A PageElement.
757
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
758
+ """
759
+ _stacklevel = kwargs.pop('_stacklevel', 2)
760
+ return self._find_all(name, attrs, None, limit, self.parents,
761
+ _stacklevel=_stacklevel+1, **kwargs)
762
+ findParents = find_parents # BS3
763
+ fetchParents = find_parents # BS2
764
+
765
+ @property
766
+ def next(self):
767
+ """The PageElement, if any, that was parsed just after this one.
768
+
769
+ :return: A PageElement.
770
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
771
+ """
772
+ return self.next_element
773
+
774
+ @property
775
+ def previous(self):
776
+ """The PageElement, if any, that was parsed just before this one.
777
+
778
+ :return: A PageElement.
779
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
780
+ """
781
+ return self.previous_element
782
+
783
+ #These methods do the real heavy lifting.
784
+
785
+ def _find_one(self, method, name, attrs, string, **kwargs):
786
+ r = None
787
+ l = method(name, attrs, string, 1, _stacklevel=4, **kwargs)
788
+ if l:
789
+ r = l[0]
790
+ return r
791
+
792
+ def _find_all(self, name, attrs, string, limit, generator, **kwargs):
793
+ "Iterates over a generator looking for things that match."
794
+ _stacklevel = kwargs.pop('_stacklevel', 3)
795
+
796
+ if string is None and 'text' in kwargs:
797
+ string = kwargs.pop('text')
798
+ warnings.warn(
799
+ "The 'text' argument to find()-type methods is deprecated. Use 'string' instead.",
800
+ DeprecationWarning, stacklevel=_stacklevel
801
+ )
802
+
803
+ if isinstance(name, SoupStrainer):
804
+ strainer = name
805
+ else:
806
+ strainer = SoupStrainer(name, attrs, string, **kwargs)
807
+
808
+ if string is None and not limit and not attrs and not kwargs:
809
+ if name is True or name is None:
810
+ # Optimization to find all tags.
811
+ result = (element for element in generator
812
+ if isinstance(element, Tag))
813
+ return ResultSet(strainer, result)
814
+ elif isinstance(name, str):
815
+ # Optimization to find all tags with a given name.
816
+ if name.count(':') == 1:
817
+ # This is a name with a prefix. If this is a namespace-aware document,
818
+ # we need to match the local name against tag.name. If not,
819
+ # we need to match the fully-qualified name against tag.name.
820
+ prefix, local_name = name.split(':', 1)
821
+ else:
822
+ prefix = None
823
+ local_name = name
824
+ result = (element for element in generator
825
+ if isinstance(element, Tag)
826
+ and (
827
+ element.name == name
828
+ ) or (
829
+ element.name == local_name
830
+ and (prefix is None or element.prefix == prefix)
831
+ )
832
+ )
833
+ return ResultSet(strainer, result)
834
+ results = ResultSet(strainer)
835
+ while True:
836
+ try:
837
+ i = next(generator)
838
+ except StopIteration:
839
+ break
840
+ if i:
841
+ found = strainer.search(i)
842
+ if found:
843
+ results.append(found)
844
+ if limit and len(results) >= limit:
845
+ break
846
+ return results
847
+
848
+ #These generators can be used to navigate starting from both
849
+ #NavigableStrings and Tags.
850
+ @property
851
+ def next_elements(self):
852
+ """All PageElements that were parsed after this one.
853
+
854
+ :yield: A sequence of PageElements.
855
+ """
856
+ i = self.next_element
857
+ while i is not None:
858
+ yield i
859
+ i = i.next_element
860
+
861
+ @property
862
+ def next_siblings(self):
863
+ """All PageElements that are siblings of this one but were parsed
864
+ later.
865
+
866
+ :yield: A sequence of PageElements.
867
+ """
868
+ i = self.next_sibling
869
+ while i is not None:
870
+ yield i
871
+ i = i.next_sibling
872
+
873
+ @property
874
+ def previous_elements(self):
875
+ """All PageElements that were parsed before this one.
876
+
877
+ :yield: A sequence of PageElements.
878
+ """
879
+ i = self.previous_element
880
+ while i is not None:
881
+ yield i
882
+ i = i.previous_element
883
+
884
+ @property
885
+ def previous_siblings(self):
886
+ """All PageElements that are siblings of this one but were parsed
887
+ earlier.
888
+
889
+ :yield: A sequence of PageElements.
890
+ """
891
+ i = self.previous_sibling
892
+ while i is not None:
893
+ yield i
894
+ i = i.previous_sibling
895
+
896
+ @property
897
+ def parents(self):
898
+ """All PageElements that are parents of this PageElement.
899
+
900
+ :yield: A sequence of PageElements.
901
+ """
902
+ i = self.parent
903
+ while i is not None:
904
+ yield i
905
+ i = i.parent
906
+
907
+ @property
908
+ def decomposed(self):
909
+ """Check whether a PageElement has been decomposed.
910
+
911
+ :rtype: bool
912
+ """
913
+ return getattr(self, '_decomposed', False) or False
914
+
915
+ # Old non-property versions of the generators, for backwards
916
+ # compatibility with BS3.
917
+ def nextGenerator(self):
918
+ return self.next_elements
919
+
920
+ def nextSiblingGenerator(self):
921
+ return self.next_siblings
922
+
923
+ def previousGenerator(self):
924
+ return self.previous_elements
925
+
926
+ def previousSiblingGenerator(self):
927
+ return self.previous_siblings
928
+
929
+ def parentGenerator(self):
930
+ return self.parents
931
+
932
+
933
+ class NavigableString(str, PageElement):
934
+ """A Python Unicode string that is part of a parse tree.
935
+
936
+ When Beautiful Soup parses the markup <b>penguin</b>, it will
937
+ create a NavigableString for the string "penguin".
938
+ """
939
+
940
+ PREFIX = ''
941
+ SUFFIX = ''
942
+
943
+ def __new__(cls, value):
944
+ """Create a new NavigableString.
945
+
946
+ When unpickling a NavigableString, this method is called with
947
+ the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
948
+ passed in to the superclass's __new__ or the superclass won't know
949
+ how to handle non-ASCII characters.
950
+ """
951
+ if isinstance(value, str):
952
+ u = str.__new__(cls, value)
953
+ else:
954
+ u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
955
+ u.setup()
956
+ return u
957
+
958
+ def __deepcopy__(self, memo, recursive=False):
959
+ """A copy of a NavigableString has the same contents and class
960
+ as the original, but it is not connected to the parse tree.
961
+
962
+ :param recursive: This parameter is ignored; it's only defined
963
+ so that NavigableString.__deepcopy__ implements the same
964
+ signature as Tag.__deepcopy__.
965
+ """
966
+ return type(self)(self)
967
+
968
+ def __copy__(self):
969
+ """A copy of a NavigableString can only be a deep copy, because
970
+ only one PageElement can occupy a given place in a parse tree.
971
+ """
972
+ return self.__deepcopy__({})
973
+
974
+ def __getnewargs__(self):
975
+ return (str(self),)
976
+
977
+ def __getattr__(self, attr):
978
+ """text.string gives you text. This is for backwards
979
+ compatibility for Navigable*String, but for CData* it lets you
980
+ get the string without the CData wrapper."""
981
+ if attr == 'string':
982
+ return self
983
+ else:
984
+ raise AttributeError(
985
+ "'%s' object has no attribute '%s'" % (
986
+ self.__class__.__name__, attr))
987
+
988
+ def output_ready(self, formatter="minimal"):
989
+ """Run the string through the provided formatter.
990
+
991
+ :param formatter: A Formatter object, or a string naming one of the standard formatters.
992
+ """
993
+ output = self.format_string(self, formatter)
994
+ return self.PREFIX + output + self.SUFFIX
995
+
996
+ @property
997
+ def name(self):
998
+ """Since a NavigableString is not a Tag, it has no .name.
999
+
1000
+ This property is implemented so that code like this doesn't crash
1001
+ when run on a mixture of Tag and NavigableString objects:
1002
+ [x.name for x in tag.children]
1003
+ """
1004
+ return None
1005
+
1006
+ @name.setter
1007
+ def name(self, name):
1008
+ """Prevent NavigableString.name from ever being set."""
1009
+ raise AttributeError("A NavigableString cannot be given a name.")
1010
+
1011
+ def _all_strings(self, strip=False, types=PageElement.default):
1012
+ """Yield all strings of certain classes, possibly stripping them.
1013
+
1014
+ This makes it easy for NavigableString to implement methods
1015
+ like get_text() as conveniences, creating a consistent
1016
+ text-extraction API across all PageElements.
1017
+
1018
+ :param strip: If True, all strings will be stripped before being
1019
+ yielded.
1020
+
1021
+ :param types: A tuple of NavigableString subclasses. If this
1022
+ NavigableString isn't one of those subclasses, the
1023
+ sequence will be empty. By default, the subclasses
1024
+ considered are NavigableString and CData objects. That
1025
+ means no comments, processing instructions, etc.
1026
+
1027
+ :yield: A sequence that either contains this string, or is empty.
1028
+
1029
+ """
1030
+ if types is self.default:
1031
+ # This is kept in Tag because it's full of subclasses of
1032
+ # this class, which aren't defined until later in the file.
1033
+ types = Tag.DEFAULT_INTERESTING_STRING_TYPES
1034
+
1035
+ # Do nothing if the caller is looking for specific types of
1036
+ # string, and we're of a different type.
1037
+ #
1038
+ # We check specific types instead of using isinstance(self,
1039
+ # types) because all of these classes subclass
1040
+ # NavigableString. Anyone who's using this feature probably
1041
+ # wants generic NavigableStrings but not other stuff.
1042
+ my_type = type(self)
1043
+ if types is not None:
1044
+ if isinstance(types, type):
1045
+ # Looking for a single type.
1046
+ if my_type is not types:
1047
+ return
1048
+ elif my_type not in types:
1049
+ # Looking for one of a list of types.
1050
+ return
1051
+
1052
+ value = self
1053
+ if strip:
1054
+ value = value.strip()
1055
+ if len(value) > 0:
1056
+ yield value
1057
+ strings = property(_all_strings)
1058
+
1059
+ class PreformattedString(NavigableString):
1060
+ """A NavigableString not subject to the normal formatting rules.
1061
+
1062
+ This is an abstract class used for special kinds of strings such
1063
+ as comments (the Comment class) and CDATA blocks (the CData
1064
+ class).
1065
+ """
1066
+
1067
+ PREFIX = ''
1068
+ SUFFIX = ''
1069
+
1070
+ def output_ready(self, formatter=None):
1071
+ """Make this string ready for output by adding any subclass-specific
1072
+ prefix or suffix.
1073
+
1074
+ :param formatter: A Formatter object, or a string naming one
1075
+ of the standard formatters. The string will be passed into the
1076
+ Formatter, but only to trigger any side effects: the return
1077
+ value is ignored.
1078
+
1079
+ :return: The string, with any subclass-specific prefix and
1080
+ suffix added on.
1081
+ """
1082
+ if formatter is not None:
1083
+ ignore = self.format_string(self, formatter)
1084
+ return self.PREFIX + self + self.SUFFIX
1085
+
1086
+ class CData(PreformattedString):
1087
+ """A CDATA block."""
1088
+ PREFIX = '<![CDATA['
1089
+ SUFFIX = ']]>'
1090
+
1091
+ class ProcessingInstruction(PreformattedString):
1092
+ """A SGML processing instruction."""
1093
+
1094
+ PREFIX = '<?'
1095
+ SUFFIX = '>'
1096
+
1097
+ class XMLProcessingInstruction(ProcessingInstruction):
1098
+ """An XML processing instruction."""
1099
+ PREFIX = '<?'
1100
+ SUFFIX = '?>'
1101
+
1102
+ class Comment(PreformattedString):
1103
+ """An HTML or XML comment."""
1104
+ PREFIX = '<!--'
1105
+ SUFFIX = '-->'
1106
+
1107
+
1108
+ class Declaration(PreformattedString):
1109
+ """An XML declaration."""
1110
+ PREFIX = '<?'
1111
+ SUFFIX = '?>'
1112
+
1113
+
1114
+ class Doctype(PreformattedString):
1115
+ """A document type declaration."""
1116
+ @classmethod
1117
+ def for_name_and_ids(cls, name, pub_id, system_id):
1118
+ """Generate an appropriate document type declaration for a given
1119
+ public ID and system ID.
1120
+
1121
+ :param name: The name of the document's root element, e.g. 'html'.
1122
+ :param pub_id: The Formal Public Identifier for this document type,
1123
+ e.g. '-//W3C//DTD XHTML 1.1//EN'
1124
+ :param system_id: The system identifier for this document type,
1125
+ e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
1126
+
1127
+ :return: A Doctype.
1128
+ """
1129
+ value = name or ''
1130
+ if pub_id is not None:
1131
+ value += ' PUBLIC "%s"' % pub_id
1132
+ if system_id is not None:
1133
+ value += ' "%s"' % system_id
1134
+ elif system_id is not None:
1135
+ value += ' SYSTEM "%s"' % system_id
1136
+
1137
+ return Doctype(value)
1138
+
1139
+ PREFIX = '<!DOCTYPE '
1140
+ SUFFIX = '>\n'
1141
+
1142
+
1143
+ class Stylesheet(NavigableString):
1144
+ """A NavigableString representing an stylesheet (probably
1145
+ CSS).
1146
+
1147
+ Used to distinguish embedded stylesheets from textual content.
1148
+ """
1149
+ pass
1150
+
1151
+
1152
+ class Script(NavigableString):
1153
+ """A NavigableString representing an executable script (probably
1154
+ Javascript).
1155
+
1156
+ Used to distinguish executable code from textual content.
1157
+ """
1158
+ pass
1159
+
1160
+
1161
+ class TemplateString(NavigableString):
1162
+ """A NavigableString representing a string found inside an HTML
1163
+ template embedded in a larger document.
1164
+
1165
+ Used to distinguish such strings from the main body of the document.
1166
+ """
1167
+ pass
1168
+
1169
+
1170
+ class RubyTextString(NavigableString):
1171
+ """A NavigableString representing the contents of the <rt> HTML
1172
+ element.
1173
+
1174
+ https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rt-element
1175
+
1176
+ Can be used to distinguish such strings from the strings they're
1177
+ annotating.
1178
+ """
1179
+ pass
1180
+
1181
+
1182
+ class RubyParenthesisString(NavigableString):
1183
+ """A NavigableString representing the contents of the <rp> HTML
1184
+ element.
1185
+
1186
+ https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rp-element
1187
+ """
1188
+ pass
1189
+
1190
+
1191
+ class Tag(PageElement):
1192
+ """Represents an HTML or XML tag that is part of a parse tree, along
1193
+ with its attributes and contents.
1194
+
1195
+ When Beautiful Soup parses the markup <b>penguin</b>, it will
1196
+ create a Tag object representing the <b> tag.
1197
+ """
1198
+
1199
+ def __init__(self, parser=None, builder=None, name=None, namespace=None,
1200
+ prefix=None, attrs=None, parent=None, previous=None,
1201
+ is_xml=None, sourceline=None, sourcepos=None,
1202
+ can_be_empty_element=None, cdata_list_attributes=None,
1203
+ preserve_whitespace_tags=None,
1204
+ interesting_string_types=None,
1205
+ namespaces=None
1206
+ ):
1207
+ """Basic constructor.
1208
+
1209
+ :param parser: A BeautifulSoup object.
1210
+ :param builder: A TreeBuilder.
1211
+ :param name: The name of the tag.
1212
+ :param namespace: The URI of this Tag's XML namespace, if any.
1213
+ :param prefix: The prefix for this Tag's XML namespace, if any.
1214
+ :param attrs: A dictionary of this Tag's attribute values.
1215
+ :param parent: The PageElement to use as this Tag's parent.
1216
+ :param previous: The PageElement that was parsed immediately before
1217
+ this tag.
1218
+ :param is_xml: If True, this is an XML tag. Otherwise, this is an
1219
+ HTML tag.
1220
+ :param sourceline: The line number where this tag was found in its
1221
+ source document.
1222
+ :param sourcepos: The character position within `sourceline` where this
1223
+ tag was found.
1224
+ :param can_be_empty_element: If True, this tag should be
1225
+ represented as <tag/>. If False, this tag should be represented
1226
+ as <tag></tag>.
1227
+ :param cdata_list_attributes: A list of attributes whose values should
1228
+ be treated as CDATA if they ever show up on this tag.
1229
+ :param preserve_whitespace_tags: A list of tag names whose contents
1230
+ should have their whitespace preserved.
1231
+ :param interesting_string_types: This is a NavigableString
1232
+ subclass or a tuple of them. When iterating over this
1233
+ Tag's strings in methods like Tag.strings or Tag.get_text,
1234
+ these are the types of strings that are interesting enough
1235
+ to be considered. The default is to consider
1236
+ NavigableString and CData the only interesting string
1237
+ subtypes.
1238
+ :param namespaces: A dictionary mapping currently active
1239
+ namespace prefixes to URIs. This can be used later to
1240
+ construct CSS selectors.
1241
+ """
1242
+ if parser is None:
1243
+ self.parser_class = None
1244
+ else:
1245
+ # We don't actually store the parser object: that lets extracted
1246
+ # chunks be garbage-collected.
1247
+ self.parser_class = parser.__class__
1248
+ if name is None:
1249
+ raise ValueError("No value provided for new tag's name.")
1250
+ self.name = name
1251
+ self.namespace = namespace
1252
+ self._namespaces = namespaces or {}
1253
+ self.prefix = prefix
1254
+ if ((not builder or builder.store_line_numbers)
1255
+ and (sourceline is not None or sourcepos is not None)):
1256
+ self.sourceline = sourceline
1257
+ self.sourcepos = sourcepos
1258
+ if attrs is None:
1259
+ attrs = {}
1260
+ elif attrs:
1261
+ if builder is not None and builder.cdata_list_attributes:
1262
+ attrs = builder._replace_cdata_list_attribute_values(
1263
+ self.name, attrs)
1264
+ else:
1265
+ attrs = dict(attrs)
1266
+ else:
1267
+ attrs = dict(attrs)
1268
+
1269
+ # If possible, determine ahead of time whether this tag is an
1270
+ # XML tag.
1271
+ if builder:
1272
+ self.known_xml = builder.is_xml
1273
+ else:
1274
+ self.known_xml = is_xml
1275
+ self.attrs = attrs
1276
+ self.contents = []
1277
+ self.setup(parent, previous)
1278
+ self.hidden = False
1279
+
1280
+ if builder is None:
1281
+ # In the absence of a TreeBuilder, use whatever values were
1282
+ # passed in here. They're probably None, unless this is a copy of some
1283
+ # other tag.
1284
+ self.can_be_empty_element = can_be_empty_element
1285
+ self.cdata_list_attributes = cdata_list_attributes
1286
+ self.preserve_whitespace_tags = preserve_whitespace_tags
1287
+ self.interesting_string_types = interesting_string_types
1288
+ else:
1289
+ # Set up any substitutions for this tag, such as the charset in a META tag.
1290
+ builder.set_up_substitutions(self)
1291
+
1292
+ # Ask the TreeBuilder whether this tag might be an empty-element tag.
1293
+ self.can_be_empty_element = builder.can_be_empty_element(name)
1294
+
1295
+ # Keep track of the list of attributes of this tag that
1296
+ # might need to be treated as a list.
1297
+ #
1298
+ # For performance reasons, we store the whole data structure
1299
+ # rather than asking the question of every tag. Asking would
1300
+ # require building a new data structure every time, and
1301
+ # (unlike can_be_empty_element), we almost never need
1302
+ # to check this.
1303
+ self.cdata_list_attributes = builder.cdata_list_attributes
1304
+
1305
+ # Keep track of the names that might cause this tag to be treated as a
1306
+ # whitespace-preserved tag.
1307
+ self.preserve_whitespace_tags = builder.preserve_whitespace_tags
1308
+
1309
+ if self.name in builder.string_containers:
1310
+ # This sort of tag uses a special string container
1311
+ # subclass for most of its strings. When we ask the
1312
+ self.interesting_string_types = builder.string_containers[self.name]
1313
+ else:
1314
+ self.interesting_string_types = self.DEFAULT_INTERESTING_STRING_TYPES
1315
+
1316
+ parserClass = _alias("parser_class") # BS3
1317
+
1318
+ def __deepcopy__(self, memo, recursive=True):
1319
+ """A deepcopy of a Tag is a new Tag, unconnected to the parse tree.
1320
+ Its contents are a copy of the old Tag's contents.
1321
+ """
1322
+ clone = self._clone()
1323
+
1324
+ if recursive:
1325
+ # Clone this tag's descendants recursively, but without
1326
+ # making any recursive function calls.
1327
+ tag_stack = [clone]
1328
+ for event, element in self._event_stream(self.descendants):
1329
+ if event is Tag.END_ELEMENT_EVENT:
1330
+ # Stop appending incoming Tags to the Tag that was
1331
+ # just closed.
1332
+ tag_stack.pop()
1333
+ else:
1334
+ descendant_clone = element.__deepcopy__(
1335
+ memo, recursive=False
1336
+ )
1337
+ # Add to its parent's .contents
1338
+ tag_stack[-1].append(descendant_clone)
1339
+
1340
+ if event is Tag.START_ELEMENT_EVENT:
1341
+ # Add the Tag itself to the stack so that its
1342
+ # children will be .appended to it.
1343
+ tag_stack.append(descendant_clone)
1344
+ return clone
1345
+
1346
+ def __copy__(self):
1347
+ """A copy of a Tag must always be a deep copy, because a Tag's
1348
+ children can only have one parent at a time.
1349
+ """
1350
+ return self.__deepcopy__({})
1351
+
1352
+ def _clone(self):
1353
+ """Create a new Tag just like this one, but with no
1354
+ contents and unattached to any parse tree.
1355
+
1356
+ This is the first step in the deepcopy process.
1357
+ """
1358
+ clone = type(self)(
1359
+ None, None, self.name, self.namespace,
1360
+ self.prefix, self.attrs, is_xml=self._is_xml,
1361
+ sourceline=self.sourceline, sourcepos=self.sourcepos,
1362
+ can_be_empty_element=self.can_be_empty_element,
1363
+ cdata_list_attributes=self.cdata_list_attributes,
1364
+ preserve_whitespace_tags=self.preserve_whitespace_tags,
1365
+ interesting_string_types=self.interesting_string_types
1366
+ )
1367
+ for attr in ('can_be_empty_element', 'hidden'):
1368
+ setattr(clone, attr, getattr(self, attr))
1369
+ return clone
1370
+
1371
+ @property
1372
+ def is_empty_element(self):
1373
+ """Is this tag an empty-element tag? (aka a self-closing tag)
1374
+
1375
+ A tag that has contents is never an empty-element tag.
1376
+
1377
+ A tag that has no contents may or may not be an empty-element
1378
+ tag. It depends on the builder used to create the tag. If the
1379
+ builder has a designated list of empty-element tags, then only
1380
+ a tag whose name shows up in that list is considered an
1381
+ empty-element tag.
1382
+
1383
+ If the builder has no designated list of empty-element tags,
1384
+ then any tag with no contents is an empty-element tag.
1385
+ """
1386
+ return len(self.contents) == 0 and self.can_be_empty_element
1387
+ isSelfClosing = is_empty_element # BS3
1388
+
1389
+ @property
1390
+ def string(self):
1391
+ """Convenience property to get the single string within this
1392
+ PageElement.
1393
+
1394
+ TODO It might make sense to have NavigableString.string return
1395
+ itself.
1396
+
1397
+ :return: If this element has a single string child, return
1398
+ value is that string. If this element has one child tag,
1399
+ return value is the 'string' attribute of the child tag,
1400
+ recursively. If this element is itself a string, has no
1401
+ children, or has more than one child, return value is None.
1402
+ """
1403
+ if len(self.contents) != 1:
1404
+ return None
1405
+ child = self.contents[0]
1406
+ if isinstance(child, NavigableString):
1407
+ return child
1408
+ return child.string
1409
+
1410
+ @string.setter
1411
+ def string(self, string):
1412
+ """Replace this PageElement's contents with `string`."""
1413
+ self.clear()
1414
+ self.append(string.__class__(string))
1415
+
1416
+ DEFAULT_INTERESTING_STRING_TYPES = (NavigableString, CData)
1417
+ def _all_strings(self, strip=False, types=PageElement.default):
1418
+ """Yield all strings of certain classes, possibly stripping them.
1419
+
1420
+ :param strip: If True, all strings will be stripped before being
1421
+ yielded.
1422
+
1423
+ :param types: A tuple of NavigableString subclasses. Any strings of
1424
+ a subclass not found in this list will be ignored. By
1425
+ default, the subclasses considered are the ones found in
1426
+ self.interesting_string_types. If that's not specified,
1427
+ only NavigableString and CData objects will be
1428
+ considered. That means no comments, processing
1429
+ instructions, etc.
1430
+
1431
+ :yield: A sequence of strings.
1432
+
1433
+ """
1434
+ if types is self.default:
1435
+ types = self.interesting_string_types
1436
+
1437
+ for descendant in self.descendants:
1438
+ if (types is None and not isinstance(descendant, NavigableString)):
1439
+ continue
1440
+ descendant_type = type(descendant)
1441
+ if isinstance(types, type):
1442
+ if descendant_type is not types:
1443
+ # We're not interested in strings of this type.
1444
+ continue
1445
+ elif types is not None and descendant_type not in types:
1446
+ # We're not interested in strings of this type.
1447
+ continue
1448
+ if strip:
1449
+ descendant = descendant.strip()
1450
+ if len(descendant) == 0:
1451
+ continue
1452
+ yield descendant
1453
+ strings = property(_all_strings)
1454
+
1455
+ def decompose(self):
1456
+ """Recursively destroys this PageElement and its children.
1457
+
1458
+ This element will be removed from the tree and wiped out; so
1459
+ will everything beneath it.
1460
+
1461
+ The behavior of a decomposed PageElement is undefined and you
1462
+ should never use one for anything, but if you need to _check_
1463
+ whether an element has been decomposed, you can use the
1464
+ `decomposed` property.
1465
+ """
1466
+ self.extract()
1467
+ i = self
1468
+ while i is not None:
1469
+ n = i.next_element
1470
+ i.__dict__.clear()
1471
+ i.contents = []
1472
+ i._decomposed = True
1473
+ i = n
1474
+
1475
+ def clear(self, decompose=False):
1476
+ """Wipe out all children of this PageElement by calling extract()
1477
+ on them.
1478
+
1479
+ :param decompose: If this is True, decompose() (a more
1480
+ destructive method) will be called instead of extract().
1481
+ """
1482
+ if decompose:
1483
+ for element in self.contents[:]:
1484
+ if isinstance(element, Tag):
1485
+ element.decompose()
1486
+ else:
1487
+ element.extract()
1488
+ else:
1489
+ for element in self.contents[:]:
1490
+ element.extract()
1491
+
1492
+ def smooth(self):
1493
+ """Smooth out this element's children by consolidating consecutive
1494
+ strings.
1495
+
1496
+ This makes pretty-printed output look more natural following a
1497
+ lot of operations that modified the tree.
1498
+ """
1499
+ # Mark the first position of every pair of children that need
1500
+ # to be consolidated. Do this rather than making a copy of
1501
+ # self.contents, since in most cases very few strings will be
1502
+ # affected.
1503
+ marked = []
1504
+ for i, a in enumerate(self.contents):
1505
+ if isinstance(a, Tag):
1506
+ # Recursively smooth children.
1507
+ a.smooth()
1508
+ if i == len(self.contents)-1:
1509
+ # This is the last item in .contents, and it's not a
1510
+ # tag. There's no chance it needs any work.
1511
+ continue
1512
+ b = self.contents[i+1]
1513
+ if (isinstance(a, NavigableString)
1514
+ and isinstance(b, NavigableString)
1515
+ and not isinstance(a, PreformattedString)
1516
+ and not isinstance(b, PreformattedString)
1517
+ ):
1518
+ marked.append(i)
1519
+
1520
+ # Go over the marked positions in reverse order, so that
1521
+ # removing items from .contents won't affect the remaining
1522
+ # positions.
1523
+ for i in reversed(marked):
1524
+ a = self.contents[i]
1525
+ b = self.contents[i+1]
1526
+ b.extract()
1527
+ n = NavigableString(a+b)
1528
+ a.replace_with(n)
1529
+
1530
+ def index(self, element):
1531
+ """Find the index of a child by identity, not value.
1532
+
1533
+ Avoids issues with tag.contents.index(element) getting the
1534
+ index of equal elements.
1535
+
1536
+ :param element: Look for this PageElement in `self.contents`.
1537
+ """
1538
+ for i, child in enumerate(self.contents):
1539
+ if child is element:
1540
+ return i
1541
+ raise ValueError("Tag.index: element not in tag")
1542
+
1543
+ def get(self, key, default=None):
1544
+ """Returns the value of the 'key' attribute for the tag, or
1545
+ the value given for 'default' if it doesn't have that
1546
+ attribute."""
1547
+ return self.attrs.get(key, default)
1548
+
1549
+ def get_attribute_list(self, key, default=None):
1550
+ """The same as get(), but always returns a list.
1551
+
1552
+ :param key: The attribute to look for.
1553
+ :param default: Use this value if the attribute is not present
1554
+ on this PageElement.
1555
+ :return: A list of values, probably containing only a single
1556
+ value.
1557
+ """
1558
+ value = self.get(key, default)
1559
+ if not isinstance(value, list):
1560
+ value = [value]
1561
+ return value
1562
+
1563
+ def has_attr(self, key):
1564
+ """Does this PageElement have an attribute with the given name?"""
1565
+ return key in self.attrs
1566
+
1567
+ def __hash__(self):
1568
+ return str(self).__hash__()
1569
+
1570
+ def __getitem__(self, key):
1571
+ """tag[key] returns the value of the 'key' attribute for the Tag,
1572
+ and throws an exception if it's not there."""
1573
+ return self.attrs[key]
1574
+
1575
+ def __iter__(self):
1576
+ "Iterating over a Tag iterates over its contents."
1577
+ return iter(self.contents)
1578
+
1579
+ def __len__(self):
1580
+ "The length of a Tag is the length of its list of contents."
1581
+ return len(self.contents)
1582
+
1583
+ def __contains__(self, x):
1584
+ return x in self.contents
1585
+
1586
+ def __bool__(self):
1587
+ "A tag is non-None even if it has no contents."
1588
+ return True
1589
+
1590
+ def __setitem__(self, key, value):
1591
+ """Setting tag[key] sets the value of the 'key' attribute for the
1592
+ tag."""
1593
+ self.attrs[key] = value
1594
+
1595
+ def __delitem__(self, key):
1596
+ "Deleting tag[key] deletes all 'key' attributes for the tag."
1597
+ self.attrs.pop(key, None)
1598
+
1599
+ def __call__(self, *args, **kwargs):
1600
+ """Calling a Tag like a function is the same as calling its
1601
+ find_all() method. Eg. tag('a') returns a list of all the A tags
1602
+ found within this tag."""
1603
+ return self.find_all(*args, **kwargs)
1604
+
1605
+ def __getattr__(self, tag):
1606
+ """Calling tag.subtag is the same as calling tag.find(name="subtag")"""
1607
+ #print("Getattr %s.%s" % (self.__class__, tag))
1608
+ if len(tag) > 3 and tag.endswith('Tag'):
1609
+ # BS3: soup.aTag -> "soup.find("a")
1610
+ tag_name = tag[:-3]
1611
+ warnings.warn(
1612
+ '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict(
1613
+ name=tag_name
1614
+ ),
1615
+ DeprecationWarning, stacklevel=2
1616
+ )
1617
+ return self.find(tag_name)
1618
+ # We special case contents to avoid recursion.
1619
+ elif not tag.startswith("__") and not tag == "contents":
1620
+ return self.find(tag)
1621
+ raise AttributeError(
1622
+ "'%s' object has no attribute '%s'" % (self.__class__, tag))
1623
+
1624
+ def __eq__(self, other):
1625
+ """Returns true iff this Tag has the same name, the same attributes,
1626
+ and the same contents (recursively) as `other`."""
1627
+ if self is other:
1628
+ return True
1629
+ if (not hasattr(other, 'name') or
1630
+ not hasattr(other, 'attrs') or
1631
+ not hasattr(other, 'contents') or
1632
+ self.name != other.name or
1633
+ self.attrs != other.attrs or
1634
+ len(self) != len(other)):
1635
+ return False
1636
+ for i, my_child in enumerate(self.contents):
1637
+ if my_child != other.contents[i]:
1638
+ return False
1639
+ return True
1640
+
1641
+ def __ne__(self, other):
1642
+ """Returns true iff this Tag is not identical to `other`,
1643
+ as defined in __eq__."""
1644
+ return not self == other
1645
+
1646
+ def __repr__(self, encoding="unicode-escape"):
1647
+ """Renders this PageElement as a string.
1648
+
1649
+ :param encoding: The encoding to use (Python 2 only).
1650
+ TODO: This is now ignored and a warning should be issued
1651
+ if a value is provided.
1652
+ :return: A (Unicode) string.
1653
+ """
1654
+ # "The return value must be a string object", i.e. Unicode
1655
+ return self.decode()
1656
+
1657
+ def __unicode__(self):
1658
+ """Renders this PageElement as a Unicode string."""
1659
+ return self.decode()
1660
+
1661
+ __str__ = __repr__ = __unicode__
1662
+
1663
+ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
1664
+ indent_level=None, formatter="minimal",
1665
+ errors="xmlcharrefreplace"):
1666
+ """Render a bytestring representation of this PageElement and its
1667
+ contents.
1668
+
1669
+ :param encoding: The destination encoding.
1670
+ :param indent_level: Each line of the rendering will be
1671
+ indented this many levels. (The formatter decides what a
1672
+ 'level' means in terms of spaces or other characters
1673
+ output.) Used internally in recursive calls while
1674
+ pretty-printing.
1675
+ :param formatter: A Formatter object, or a string naming one of
1676
+ the standard formatters.
1677
+ :param errors: An error handling strategy such as
1678
+ 'xmlcharrefreplace'. This value is passed along into
1679
+ encode() and its value should be one of the constants
1680
+ defined by Python.
1681
+ :return: A bytestring.
1682
+
1683
+ """
1684
+ # Turn the data structure into Unicode, then encode the
1685
+ # Unicode.
1686
+ u = self.decode(indent_level, encoding, formatter)
1687
+ return u.encode(encoding, errors)
1688
+
1689
+ def decode(self, indent_level=None,
1690
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
1691
+ formatter="minimal",
1692
+ iterator=None):
1693
+ pieces = []
1694
+ # First off, turn a non-Formatter `formatter` into a Formatter
1695
+ # object. This will stop the lookup from happening over and
1696
+ # over again.
1697
+ if not isinstance(formatter, Formatter):
1698
+ formatter = self.formatter_for_name(formatter)
1699
+
1700
+ if indent_level is True:
1701
+ indent_level = 0
1702
+
1703
+ # The currently active tag that put us into string literal
1704
+ # mode. Until this element is closed, children will be treated
1705
+ # as string literals and not pretty-printed. String literal
1706
+ # mode is turned on immediately after this tag begins, and
1707
+ # turned off immediately before it's closed. This means there
1708
+ # will be whitespace before and after the tag itself.
1709
+ string_literal_tag = None
1710
+
1711
+ for event, element in self._event_stream(iterator):
1712
+ if event in (Tag.START_ELEMENT_EVENT, Tag.EMPTY_ELEMENT_EVENT):
1713
+ piece = element._format_tag(
1714
+ eventual_encoding, formatter, opening=True
1715
+ )
1716
+ elif event is Tag.END_ELEMENT_EVENT:
1717
+ piece = element._format_tag(
1718
+ eventual_encoding, formatter, opening=False
1719
+ )
1720
+ if indent_level is not None:
1721
+ indent_level -= 1
1722
+ else:
1723
+ piece = element.output_ready(formatter)
1724
+
1725
+ # Now we need to apply the 'prettiness' -- extra
1726
+ # whitespace before and/or after this tag. This can get
1727
+ # complicated because certain tags, like <pre> and
1728
+ # <script>, can't be prettified, since adding whitespace would
1729
+ # change the meaning of the content.
1730
+
1731
+ # The default behavior is to add whitespace before and
1732
+ # after an element when string literal mode is off, and to
1733
+ # leave things as they are when string literal mode is on.
1734
+ if string_literal_tag:
1735
+ indent_before = indent_after = False
1736
+ else:
1737
+ indent_before = indent_after = True
1738
+
1739
+ # The only time the behavior is more complex than that is
1740
+ # when we encounter an opening or closing tag that might
1741
+ # put us into or out of string literal mode.
1742
+ if (event is Tag.START_ELEMENT_EVENT
1743
+ and not string_literal_tag
1744
+ and not element._should_pretty_print()):
1745
+ # We are about to enter string literal mode. Add
1746
+ # whitespace before this tag, but not after. We
1747
+ # will stay in string literal mode until this tag
1748
+ # is closed.
1749
+ indent_before = True
1750
+ indent_after = False
1751
+ string_literal_tag = element
1752
+ elif (event is Tag.END_ELEMENT_EVENT
1753
+ and element is string_literal_tag):
1754
+ # We are about to exit string literal mode by closing
1755
+ # the tag that sent us into that mode. Add whitespace
1756
+ # after this tag, but not before.
1757
+ indent_before = False
1758
+ indent_after = True
1759
+ string_literal_tag = None
1760
+
1761
+ # Now we know whether to add whitespace before and/or
1762
+ # after this element.
1763
+ if indent_level is not None:
1764
+ if (indent_before or indent_after):
1765
+ if isinstance(element, NavigableString):
1766
+ piece = piece.strip()
1767
+ if piece:
1768
+ piece = self._indent_string(
1769
+ piece, indent_level, formatter,
1770
+ indent_before, indent_after
1771
+ )
1772
+ if event == Tag.START_ELEMENT_EVENT:
1773
+ indent_level += 1
1774
+ pieces.append(piece)
1775
+ return "".join(pieces)
1776
+
1777
+ # Names for the different events yielded by _event_stream
1778
+ START_ELEMENT_EVENT = object()
1779
+ END_ELEMENT_EVENT = object()
1780
+ EMPTY_ELEMENT_EVENT = object()
1781
+ STRING_ELEMENT_EVENT = object()
1782
+
1783
+ def _event_stream(self, iterator=None):
1784
+ """Yield a sequence of events that can be used to reconstruct the DOM
1785
+ for this element.
1786
+
1787
+ This lets us recreate the nested structure of this element
1788
+ (e.g. when formatting it as a string) without using recursive
1789
+ method calls.
1790
+
1791
+ This is similar in concept to the SAX API, but it's a simpler
1792
+ interface designed for internal use. The events are different
1793
+ from SAX and the arguments associated with the events are Tags
1794
+ and other Beautiful Soup objects.
1795
+
1796
+ :param iterator: An alternate iterator to use when traversing
1797
+ the tree.
1798
+ """
1799
+ tag_stack = []
1800
+
1801
+ iterator = iterator or self.self_and_descendants
1802
+
1803
+ for c in iterator:
1804
+ # If the parent of the element we're about to yield is not
1805
+ # the tag currently on the stack, it means that the tag on
1806
+ # the stack closed before this element appeared.
1807
+ while tag_stack and c.parent != tag_stack[-1]:
1808
+ now_closed_tag = tag_stack.pop()
1809
+ yield Tag.END_ELEMENT_EVENT, now_closed_tag
1810
+
1811
+ if isinstance(c, Tag):
1812
+ if c.is_empty_element:
1813
+ yield Tag.EMPTY_ELEMENT_EVENT, c
1814
+ else:
1815
+ yield Tag.START_ELEMENT_EVENT, c
1816
+ tag_stack.append(c)
1817
+ continue
1818
+ else:
1819
+ yield Tag.STRING_ELEMENT_EVENT, c
1820
+
1821
+ while tag_stack:
1822
+ now_closed_tag = tag_stack.pop()
1823
+ yield Tag.END_ELEMENT_EVENT, now_closed_tag
1824
+
1825
+ def _indent_string(self, s, indent_level, formatter,
1826
+ indent_before, indent_after):
1827
+ """Add indentation whitespace before and/or after a string.
1828
+
1829
+ :param s: The string to amend with whitespace.
1830
+ :param indent_level: The indentation level; affects how much
1831
+ whitespace goes before the string.
1832
+ :param indent_before: Whether or not to add whitespace
1833
+ before the string.
1834
+ :param indent_after: Whether or not to add whitespace
1835
+ (a newline) after the string.
1836
+ """
1837
+ space_before = ''
1838
+ if indent_before and indent_level:
1839
+ space_before = (formatter.indent * indent_level)
1840
+
1841
+ space_after = ''
1842
+ if indent_after:
1843
+ space_after = "\n"
1844
+
1845
+ return space_before + s + space_after
1846
+
1847
+ def _format_tag(self, eventual_encoding, formatter, opening):
1848
+ if self.hidden:
1849
+ # A hidden tag is invisible, although its contents
1850
+ # are visible.
1851
+ return ''
1852
+
1853
+ # A tag starts with the < character (see below).
1854
+
1855
+ # Then the / character, if this is a closing tag.
1856
+ closing_slash = ''
1857
+ if not opening:
1858
+ closing_slash = '/'
1859
+
1860
+ # Then an optional namespace prefix.
1861
+ prefix = ''
1862
+ if self.prefix:
1863
+ prefix = self.prefix + ":"
1864
+
1865
+ # Then a list of attribute values, if this is an opening tag.
1866
+ attribute_string = ''
1867
+ if opening:
1868
+ attributes = formatter.attributes(self)
1869
+ attrs = []
1870
+ for key, val in attributes:
1871
+ if val is None:
1872
+ decoded = key
1873
+ else:
1874
+ if isinstance(val, list) or isinstance(val, tuple):
1875
+ val = ' '.join(val)
1876
+ elif not isinstance(val, str):
1877
+ val = str(val)
1878
+ elif (
1879
+ isinstance(val, AttributeValueWithCharsetSubstitution)
1880
+ and eventual_encoding is not None
1881
+ ):
1882
+ val = val.encode(eventual_encoding)
1883
+
1884
+ text = formatter.attribute_value(val)
1885
+ decoded = (
1886
+ str(key) + '='
1887
+ + formatter.quoted_attribute_value(text))
1888
+ attrs.append(decoded)
1889
+ if attrs:
1890
+ attribute_string = ' ' + ' '.join(attrs)
1891
+
1892
+ # Then an optional closing slash (for a void element in an
1893
+ # XML document).
1894
+ void_element_closing_slash = ''
1895
+ if self.is_empty_element:
1896
+ void_element_closing_slash = formatter.void_element_close_prefix or ''
1897
+
1898
+ # Put it all together.
1899
+ return '<' + closing_slash + prefix + self.name + attribute_string + void_element_closing_slash + '>'
1900
+
1901
+ def _should_pretty_print(self, indent_level=1):
1902
+ """Should this tag be pretty-printed?
1903
+
1904
+ Most of them should, but some (such as <pre> in HTML
1905
+ documents) should not.
1906
+ """
1907
+ return (
1908
+ indent_level is not None
1909
+ and (
1910
+ not self.preserve_whitespace_tags
1911
+ or self.name not in self.preserve_whitespace_tags
1912
+ )
1913
+ )
1914
+
1915
+ def prettify(self, encoding=None, formatter="minimal"):
1916
+ """Pretty-print this PageElement as a string.
1917
+
1918
+ :param encoding: The eventual encoding of the string. If this is None,
1919
+ a Unicode string will be returned.
1920
+ :param formatter: A Formatter object, or a string naming one of
1921
+ the standard formatters.
1922
+ :return: A Unicode string (if encoding==None) or a bytestring
1923
+ (otherwise).
1924
+ """
1925
+ if encoding is None:
1926
+ return self.decode(True, formatter=formatter)
1927
+ else:
1928
+ return self.encode(encoding, True, formatter=formatter)
1929
+
1930
+ def decode_contents(self, indent_level=None,
1931
+ eventual_encoding=DEFAULT_OUTPUT_ENCODING,
1932
+ formatter="minimal"):
1933
+ """Renders the contents of this tag as a Unicode string.
1934
+
1935
+ :param indent_level: Each line of the rendering will be
1936
+ indented this many levels. (The formatter decides what a
1937
+ 'level' means in terms of spaces or other characters
1938
+ output.) Used internally in recursive calls while
1939
+ pretty-printing.
1940
+
1941
+ :param eventual_encoding: The tag is destined to be
1942
+ encoded into this encoding. decode_contents() is _not_
1943
+ responsible for performing that encoding. This information
1944
+ is passed in so that it can be substituted in if the
1945
+ document contains a <META> tag that mentions the document's
1946
+ encoding.
1947
+
1948
+ :param formatter: A Formatter object, or a string naming one of
1949
+ the standard Formatters.
1950
+
1951
+ """
1952
+ return self.decode(indent_level, eventual_encoding, formatter,
1953
+ iterator=self.descendants)
1954
+
1955
+ def encode_contents(
1956
+ self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
1957
+ formatter="minimal"):
1958
+ """Renders the contents of this PageElement as a bytestring.
1959
+
1960
+ :param indent_level: Each line of the rendering will be
1961
+ indented this many levels. (The formatter decides what a
1962
+ 'level' means in terms of spaces or other characters
1963
+ output.) Used internally in recursive calls while
1964
+ pretty-printing.
1965
+
1966
+ :param eventual_encoding: The bytestring will be in this encoding.
1967
+
1968
+ :param formatter: A Formatter object, or a string naming one of
1969
+ the standard Formatters.
1970
+
1971
+ :return: A bytestring.
1972
+ """
1973
+ contents = self.decode_contents(indent_level, encoding, formatter)
1974
+ return contents.encode(encoding)
1975
+
1976
+ # Old method for BS3 compatibility
1977
+ def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
1978
+ prettyPrint=False, indentLevel=0):
1979
+ """Deprecated method for BS3 compatibility."""
1980
+ if not prettyPrint:
1981
+ indentLevel = None
1982
+ return self.encode_contents(
1983
+ indent_level=indentLevel, encoding=encoding)
1984
+
1985
+ #Soup methods
1986
+
1987
+ def find(self, name=None, attrs={}, recursive=True, string=None,
1988
+ **kwargs):
1989
+ """Look in the children of this PageElement and find the first
1990
+ PageElement that matches the given criteria.
1991
+
1992
+ All find_* methods take a common set of arguments. See the online
1993
+ documentation for detailed explanations.
1994
+
1995
+ :param name: A filter on tag name.
1996
+ :param attrs: A dictionary of filters on attribute values.
1997
+ :param recursive: If this is True, find() will perform a
1998
+ recursive search of this PageElement's children. Otherwise,
1999
+ only the direct children will be considered.
2000
+ :param limit: Stop looking after finding this many results.
2001
+ :kwargs: A dictionary of filters on attribute values.
2002
+ :return: A PageElement.
2003
+ :rtype: bs4.element.Tag | bs4.element.NavigableString
2004
+ """
2005
+ r = None
2006
+ l = self.find_all(name, attrs, recursive, string, 1, _stacklevel=3,
2007
+ **kwargs)
2008
+ if l:
2009
+ r = l[0]
2010
+ return r
2011
+ findChild = find #BS2
2012
+
2013
+ def find_all(self, name=None, attrs={}, recursive=True, string=None,
2014
+ limit=None, **kwargs):
2015
+ """Look in the children of this PageElement and find all
2016
+ PageElements that match the given criteria.
2017
+
2018
+ All find_* methods take a common set of arguments. See the online
2019
+ documentation for detailed explanations.
2020
+
2021
+ :param name: A filter on tag name.
2022
+ :param attrs: A dictionary of filters on attribute values.
2023
+ :param recursive: If this is True, find_all() will perform a
2024
+ recursive search of this PageElement's children. Otherwise,
2025
+ only the direct children will be considered.
2026
+ :param limit: Stop looking after finding this many results.
2027
+ :kwargs: A dictionary of filters on attribute values.
2028
+ :return: A ResultSet of PageElements.
2029
+ :rtype: bs4.element.ResultSet
2030
+ """
2031
+ generator = self.descendants
2032
+ if not recursive:
2033
+ generator = self.children
2034
+ _stacklevel = kwargs.pop('_stacklevel', 2)
2035
+ return self._find_all(name, attrs, string, limit, generator,
2036
+ _stacklevel=_stacklevel+1, **kwargs)
2037
+ findAll = find_all # BS3
2038
+ findChildren = find_all # BS2
2039
+
2040
+ #Generator methods
2041
+ @property
2042
+ def children(self):
2043
+ """Iterate over all direct children of this PageElement.
2044
+
2045
+ :yield: A sequence of PageElements.
2046
+ """
2047
+ # return iter() to make the purpose of the method clear
2048
+ return iter(self.contents) # XXX This seems to be untested.
2049
+
2050
+ @property
2051
+ def self_and_descendants(self):
2052
+ """Iterate over this PageElement and its children in a
2053
+ breadth-first sequence.
2054
+
2055
+ :yield: A sequence of PageElements.
2056
+ """
2057
+ if not self.hidden:
2058
+ yield self
2059
+ for i in self.descendants:
2060
+ yield i
2061
+
2062
+ @property
2063
+ def descendants(self):
2064
+ """Iterate over all children of this PageElement in a
2065
+ breadth-first sequence.
2066
+
2067
+ :yield: A sequence of PageElements.
2068
+ """
2069
+ if not len(self.contents):
2070
+ return
2071
+ stopNode = self._last_descendant().next_element
2072
+ current = self.contents[0]
2073
+ while current is not stopNode:
2074
+ yield current
2075
+ current = current.next_element
2076
+
2077
+ # CSS selector code
2078
+ def select_one(self, selector, namespaces=None, **kwargs):
2079
+ """Perform a CSS selection operation on the current element.
2080
+
2081
+ :param selector: A CSS selector.
2082
+
2083
+ :param namespaces: A dictionary mapping namespace prefixes
2084
+ used in the CSS selector to namespace URIs. By default,
2085
+ Beautiful Soup will use the prefixes it encountered while
2086
+ parsing the document.
2087
+
2088
+ :param kwargs: Keyword arguments to be passed into Soup Sieve's
2089
+ soupsieve.select() method.
2090
+
2091
+ :return: A Tag.
2092
+ :rtype: bs4.element.Tag
2093
+ """
2094
+ return self.css.select_one(selector, namespaces, **kwargs)
2095
+
2096
+ def select(self, selector, namespaces=None, limit=None, **kwargs):
2097
+ """Perform a CSS selection operation on the current element.
2098
+
2099
+ This uses the SoupSieve library.
2100
+
2101
+ :param selector: A string containing a CSS selector.
2102
+
2103
+ :param namespaces: A dictionary mapping namespace prefixes
2104
+ used in the CSS selector to namespace URIs. By default,
2105
+ Beautiful Soup will use the prefixes it encountered while
2106
+ parsing the document.
2107
+
2108
+ :param limit: After finding this number of results, stop looking.
2109
+
2110
+ :param kwargs: Keyword arguments to be passed into SoupSieve's
2111
+ soupsieve.select() method.
2112
+
2113
+ :return: A ResultSet of Tags.
2114
+ :rtype: bs4.element.ResultSet
2115
+ """
2116
+ return self.css.select(selector, namespaces, limit, **kwargs)
2117
+
2118
+ @property
2119
+ def css(self):
2120
+ """Return an interface to the CSS selector API."""
2121
+ return CSS(self)
2122
+
2123
+ # Old names for backwards compatibility
2124
+ def childGenerator(self):
2125
+ """Deprecated generator."""
2126
+ return self.children
2127
+
2128
+ def recursiveChildGenerator(self):
2129
+ """Deprecated generator."""
2130
+ return self.descendants
2131
+
2132
+ def has_key(self, key):
2133
+ """Deprecated method. This was kind of misleading because has_key()
2134
+ (attributes) was different from __in__ (contents).
2135
+
2136
+ has_key() is gone in Python 3, anyway.
2137
+ """
2138
+ warnings.warn(
2139
+ 'has_key is deprecated. Use has_attr(key) instead.',
2140
+ DeprecationWarning, stacklevel=2
2141
+ )
2142
+ return self.has_attr(key)
2143
+
2144
+ # Next, a couple classes to represent queries and their results.
2145
+ class SoupStrainer(object):
2146
+ """Encapsulates a number of ways of matching a markup element (tag or
2147
+ string).
2148
+
2149
+ This is primarily used to underpin the find_* methods, but you can
2150
+ create one yourself and pass it in as `parse_only` to the
2151
+ `BeautifulSoup` constructor, to parse a subset of a large
2152
+ document.
2153
+ """
2154
+
2155
+ def __init__(self, name=None, attrs={}, string=None, **kwargs):
2156
+ """Constructor.
2157
+
2158
+ The SoupStrainer constructor takes the same arguments passed
2159
+ into the find_* methods. See the online documentation for
2160
+ detailed explanations.
2161
+
2162
+ :param name: A filter on tag name.
2163
+ :param attrs: A dictionary of filters on attribute values.
2164
+ :param string: A filter for a NavigableString with specific text.
2165
+ :kwargs: A dictionary of filters on attribute values.
2166
+ """
2167
+ if string is None and 'text' in kwargs:
2168
+ string = kwargs.pop('text')
2169
+ warnings.warn(
2170
+ "The 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead.",
2171
+ DeprecationWarning, stacklevel=2
2172
+ )
2173
+
2174
+ self.name = self._normalize_search_value(name)
2175
+ if not isinstance(attrs, dict):
2176
+ # Treat a non-dict value for attrs as a search for the 'class'
2177
+ # attribute.
2178
+ kwargs['class'] = attrs
2179
+ attrs = None
2180
+
2181
+ if 'class_' in kwargs:
2182
+ # Treat class_="foo" as a search for the 'class'
2183
+ # attribute, overriding any non-dict value for attrs.
2184
+ kwargs['class'] = kwargs['class_']
2185
+ del kwargs['class_']
2186
+
2187
+ if kwargs:
2188
+ if attrs:
2189
+ attrs = attrs.copy()
2190
+ attrs.update(kwargs)
2191
+ else:
2192
+ attrs = kwargs
2193
+ normalized_attrs = {}
2194
+ for key, value in list(attrs.items()):
2195
+ normalized_attrs[key] = self._normalize_search_value(value)
2196
+
2197
+ self.attrs = normalized_attrs
2198
+ self.string = self._normalize_search_value(string)
2199
+
2200
+ # DEPRECATED but just in case someone is checking this.
2201
+ self.text = self.string
2202
+
2203
+ def _normalize_search_value(self, value):
2204
+ # Leave it alone if it's a Unicode string, a callable, a
2205
+ # regular expression, a boolean, or None.
2206
+ if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match')
2207
+ or isinstance(value, bool) or value is None):
2208
+ return value
2209
+
2210
+ # If it's a bytestring, convert it to Unicode, treating it as UTF-8.
2211
+ if isinstance(value, bytes):
2212
+ return value.decode("utf8")
2213
+
2214
+ # If it's listlike, convert it into a list of strings.
2215
+ if hasattr(value, '__iter__'):
2216
+ new_value = []
2217
+ for v in value:
2218
+ if (hasattr(v, '__iter__') and not isinstance(v, bytes)
2219
+ and not isinstance(v, str)):
2220
+ # This is almost certainly the user's mistake. In the
2221
+ # interests of avoiding infinite loops, we'll let
2222
+ # it through as-is rather than doing a recursive call.
2223
+ new_value.append(v)
2224
+ else:
2225
+ new_value.append(self._normalize_search_value(v))
2226
+ return new_value
2227
+
2228
+ # Otherwise, convert it into a Unicode string.
2229
+ # The unicode(str()) thing is so this will do the same thing on Python 2
2230
+ # and Python 3.
2231
+ return str(str(value))
2232
+
2233
+ def __str__(self):
2234
+ """A human-readable representation of this SoupStrainer."""
2235
+ if self.string:
2236
+ return self.string
2237
+ else:
2238
+ return "%s|%s" % (self.name, self.attrs)
2239
+
2240
+ def search_tag(self, markup_name=None, markup_attrs={}):
2241
+ """Check whether a Tag with the given name and attributes would
2242
+ match this SoupStrainer.
2243
+
2244
+ Used prospectively to decide whether to even bother creating a Tag
2245
+ object.
2246
+
2247
+ :param markup_name: A tag name as found in some markup.
2248
+ :param markup_attrs: A dictionary of attributes as found in some markup.
2249
+
2250
+ :return: True if the prospective tag would match this SoupStrainer;
2251
+ False otherwise.
2252
+ """
2253
+ found = None
2254
+ markup = None
2255
+ if isinstance(markup_name, Tag):
2256
+ markup = markup_name
2257
+ markup_attrs = markup
2258
+
2259
+ if isinstance(self.name, str):
2260
+ # Optimization for a very common case where the user is
2261
+ # searching for a tag with one specific name, and we're
2262
+ # looking at a tag with a different name.
2263
+ if markup and not markup.prefix and self.name != markup.name:
2264
+ return False
2265
+
2266
+ call_function_with_tag_data = (
2267
+ isinstance(self.name, Callable)
2268
+ and not isinstance(markup_name, Tag))
2269
+
2270
+ if ((not self.name)
2271
+ or call_function_with_tag_data
2272
+ or (markup and self._matches(markup, self.name))
2273
+ or (not markup and self._matches(markup_name, self.name))):
2274
+ if call_function_with_tag_data:
2275
+ match = self.name(markup_name, markup_attrs)
2276
+ else:
2277
+ match = True
2278
+ markup_attr_map = None
2279
+ for attr, match_against in list(self.attrs.items()):
2280
+ if not markup_attr_map:
2281
+ if hasattr(markup_attrs, 'get'):
2282
+ markup_attr_map = markup_attrs
2283
+ else:
2284
+ markup_attr_map = {}
2285
+ for k, v in markup_attrs:
2286
+ markup_attr_map[k] = v
2287
+ attr_value = markup_attr_map.get(attr)
2288
+ if not self._matches(attr_value, match_against):
2289
+ match = False
2290
+ break
2291
+ if match:
2292
+ if markup:
2293
+ found = markup
2294
+ else:
2295
+ found = markup_name
2296
+ if found and self.string and not self._matches(found.string, self.string):
2297
+ found = None
2298
+ return found
2299
+
2300
+ # For BS3 compatibility.
2301
+ searchTag = search_tag
2302
+
2303
+ def search(self, markup):
2304
+ """Find all items in `markup` that match this SoupStrainer.
2305
+
2306
+ Used by the core _find_all() method, which is ultimately
2307
+ called by all find_* methods.
2308
+
2309
+ :param markup: A PageElement or a list of them.
2310
+ """
2311
+ # print('looking for %s in %s' % (self, markup))
2312
+ found = None
2313
+ # If given a list of items, scan it for a text element that
2314
+ # matches.
2315
+ if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
2316
+ for element in markup:
2317
+ if isinstance(element, NavigableString) \
2318
+ and self.search(element):
2319
+ found = element
2320
+ break
2321
+ # If it's a Tag, make sure its name or attributes match.
2322
+ # Don't bother with Tags if we're searching for text.
2323
+ elif isinstance(markup, Tag):
2324
+ if not self.string or self.name or self.attrs:
2325
+ found = self.search_tag(markup)
2326
+ # If it's text, make sure the text matches.
2327
+ elif isinstance(markup, NavigableString) or \
2328
+ isinstance(markup, str):
2329
+ if not self.name and not self.attrs and self._matches(markup, self.string):
2330
+ found = markup
2331
+ else:
2332
+ raise Exception(
2333
+ "I don't know how to match against a %s" % markup.__class__)
2334
+ return found
2335
+
2336
+ def _matches(self, markup, match_against, already_tried=None):
2337
+ # print(u"Matching %s against %s" % (markup, match_against))
2338
+ result = False
2339
+ if isinstance(markup, list) or isinstance(markup, tuple):
2340
+ # This should only happen when searching a multi-valued attribute
2341
+ # like 'class'.
2342
+ for item in markup:
2343
+ if self._matches(item, match_against):
2344
+ return True
2345
+ # We didn't match any particular value of the multivalue
2346
+ # attribute, but maybe we match the attribute value when
2347
+ # considered as a string.
2348
+ if self._matches(' '.join(markup), match_against):
2349
+ return True
2350
+ return False
2351
+
2352
+ if match_against is True:
2353
+ # True matches any non-None value.
2354
+ return markup is not None
2355
+
2356
+ if isinstance(match_against, Callable):
2357
+ return match_against(markup)
2358
+
2359
+ # Custom callables take the tag as an argument, but all
2360
+ # other ways of matching match the tag name as a string.
2361
+ original_markup = markup
2362
+ if isinstance(markup, Tag):
2363
+ markup = markup.name
2364
+
2365
+ # Ensure that `markup` is either a Unicode string, or None.
2366
+ markup = self._normalize_search_value(markup)
2367
+
2368
+ if markup is None:
2369
+ # None matches None, False, an empty string, an empty list, and so on.
2370
+ return not match_against
2371
+
2372
+ if (hasattr(match_against, '__iter__')
2373
+ and not isinstance(match_against, str)):
2374
+ # We're asked to match against an iterable of items.
2375
+ # The markup must be match at least one item in the
2376
+ # iterable. We'll try each one in turn.
2377
+ #
2378
+ # To avoid infinite recursion we need to keep track of
2379
+ # items we've already seen.
2380
+ if not already_tried:
2381
+ already_tried = set()
2382
+ for item in match_against:
2383
+ if item.__hash__:
2384
+ key = item
2385
+ else:
2386
+ key = id(item)
2387
+ if key in already_tried:
2388
+ continue
2389
+ else:
2390
+ already_tried.add(key)
2391
+ if self._matches(original_markup, item, already_tried):
2392
+ return True
2393
+ else:
2394
+ return False
2395
+
2396
+ # Beyond this point we might need to run the test twice: once against
2397
+ # the tag's name and once against its prefixed name.
2398
+ match = False
2399
+
2400
+ if not match and isinstance(match_against, str):
2401
+ # Exact string match
2402
+ match = markup == match_against
2403
+
2404
+ if not match and hasattr(match_against, 'search'):
2405
+ # Regexp match
2406
+ return match_against.search(markup)
2407
+
2408
+ if (not match
2409
+ and isinstance(original_markup, Tag)
2410
+ and original_markup.prefix):
2411
+ # Try the whole thing again with the prefixed tag name.
2412
+ return self._matches(
2413
+ original_markup.prefix + ':' + original_markup.name, match_against
2414
+ )
2415
+
2416
+ return match
2417
+
2418
+
2419
+ class ResultSet(list):
2420
+ """A ResultSet is just a list that keeps track of the SoupStrainer
2421
+ that created it."""
2422
+ def __init__(self, source, result=()):
2423
+ """Constructor.
2424
+
2425
+ :param source: A SoupStrainer.
2426
+ :param result: A list of PageElements.
2427
+ """
2428
+ super(ResultSet, self).__init__(result)
2429
+ self.source = source
2430
+
2431
+ def __getattr__(self, key):
2432
+ """Raise a helpful exception to explain a common code fix."""
2433
+ raise AttributeError(
2434
+ "ResultSet object has no attribute '%s'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?" % key
2435
+ )
vlmpy310/lib/python3.10/site-packages/bs4/formatter.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4.dammit import EntitySubstitution
2
+
3
+ class Formatter(EntitySubstitution):
4
+ """Describes a strategy to use when outputting a parse tree to a string.
5
+
6
+ Some parts of this strategy come from the distinction between
7
+ HTML4, HTML5, and XML. Others are configurable by the user.
8
+
9
+ Formatters are passed in as the `formatter` argument to methods
10
+ like `PageElement.encode`. Most people won't need to think about
11
+ formatters, and most people who need to think about them can pass
12
+ in one of these predefined strings as `formatter` rather than
13
+ making a new Formatter object:
14
+
15
+ For HTML documents:
16
+ * 'html' - HTML entity substitution for generic HTML documents. (default)
17
+ * 'html5' - HTML entity substitution for HTML5 documents, as
18
+ well as some optimizations in the way tags are rendered.
19
+ * 'minimal' - Only make the substitutions necessary to guarantee
20
+ valid HTML.
21
+ * None - Do not perform any substitution. This will be faster
22
+ but may result in invalid markup.
23
+
24
+ For XML documents:
25
+ * 'html' - Entity substitution for XHTML documents.
26
+ * 'minimal' - Only make the substitutions necessary to guarantee
27
+ valid XML. (default)
28
+ * None - Do not perform any substitution. This will be faster
29
+ but may result in invalid markup.
30
+ """
31
+ # Registries of XML and HTML formatters.
32
+ XML_FORMATTERS = {}
33
+ HTML_FORMATTERS = {}
34
+
35
+ HTML = 'html'
36
+ XML = 'xml'
37
+
38
+ HTML_DEFAULTS = dict(
39
+ cdata_containing_tags=set(["script", "style"]),
40
+ )
41
+
42
+ def _default(self, language, value, kwarg):
43
+ if value is not None:
44
+ return value
45
+ if language == self.XML:
46
+ return set()
47
+ return self.HTML_DEFAULTS[kwarg]
48
+
49
+ def __init__(
50
+ self, language=None, entity_substitution=None,
51
+ void_element_close_prefix='/', cdata_containing_tags=None,
52
+ empty_attributes_are_booleans=False, indent=1,
53
+ ):
54
+ r"""Constructor.
55
+
56
+ :param language: This should be Formatter.XML if you are formatting
57
+ XML markup and Formatter.HTML if you are formatting HTML markup.
58
+
59
+ :param entity_substitution: A function to call to replace special
60
+ characters with XML/HTML entities. For examples, see
61
+ bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
62
+ :param void_element_close_prefix: By default, void elements
63
+ are represented as <tag/> (XML rules) rather than <tag>
64
+ (HTML rules). To get <tag>, pass in the empty string.
65
+ :param cdata_containing_tags: The list of tags that are defined
66
+ as containing CDATA in this dialect. For example, in HTML,
67
+ <script> and <style> tags are defined as containing CDATA,
68
+ and their contents should not be formatted.
69
+ :param blank_attributes_are_booleans: Render attributes whose value
70
+ is the empty string as HTML-style boolean attributes.
71
+ (Attributes whose value is None are always rendered this way.)
72
+
73
+ :param indent: If indent is a non-negative integer or string,
74
+ then the contents of elements will be indented
75
+ appropriately when pretty-printing. An indent level of 0,
76
+ negative, or "" will only insert newlines. Using a
77
+ positive integer indent indents that many spaces per
78
+ level. If indent is a string (such as "\t"), that string
79
+ is used to indent each level. The default behavior is to
80
+ indent one space per level.
81
+ """
82
+ self.language = language
83
+ self.entity_substitution = entity_substitution
84
+ self.void_element_close_prefix = void_element_close_prefix
85
+ self.cdata_containing_tags = self._default(
86
+ language, cdata_containing_tags, 'cdata_containing_tags'
87
+ )
88
+ self.empty_attributes_are_booleans=empty_attributes_are_booleans
89
+ if indent is None:
90
+ indent = 0
91
+ if isinstance(indent, int):
92
+ if indent < 0:
93
+ indent = 0
94
+ indent = ' ' * indent
95
+ elif isinstance(indent, str):
96
+ indent = indent
97
+ else:
98
+ indent = ' '
99
+ self.indent = indent
100
+
101
+ def substitute(self, ns):
102
+ """Process a string that needs to undergo entity substitution.
103
+ This may be a string encountered in an attribute value or as
104
+ text.
105
+
106
+ :param ns: A string.
107
+ :return: A string with certain characters replaced by named
108
+ or numeric entities.
109
+ """
110
+ if not self.entity_substitution:
111
+ return ns
112
+ from .element import NavigableString
113
+ if (isinstance(ns, NavigableString)
114
+ and ns.parent is not None
115
+ and ns.parent.name in self.cdata_containing_tags):
116
+ # Do nothing.
117
+ return ns
118
+ # Substitute.
119
+ return self.entity_substitution(ns)
120
+
121
+ def attribute_value(self, value):
122
+ """Process the value of an attribute.
123
+
124
+ :param ns: A string.
125
+ :return: A string with certain characters replaced by named
126
+ or numeric entities.
127
+ """
128
+ return self.substitute(value)
129
+
130
+ def attributes(self, tag):
131
+ """Reorder a tag's attributes however you want.
132
+
133
+ By default, attributes are sorted alphabetically. This makes
134
+ behavior consistent between Python 2 and Python 3, and preserves
135
+ backwards compatibility with older versions of Beautiful Soup.
136
+
137
+ If `empty_boolean_attributes` is True, then attributes whose
138
+ values are set to the empty string will be treated as boolean
139
+ attributes.
140
+ """
141
+ if tag.attrs is None:
142
+ return []
143
+ return sorted(
144
+ (k, (None if self.empty_attributes_are_booleans and v == '' else v))
145
+ for k, v in list(tag.attrs.items())
146
+ )
147
+
148
+ class HTMLFormatter(Formatter):
149
+ """A generic Formatter for HTML."""
150
+ REGISTRY = {}
151
+ def __init__(self, *args, **kwargs):
152
+ super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs)
153
+
154
+
155
+ class XMLFormatter(Formatter):
156
+ """A generic Formatter for XML."""
157
+ REGISTRY = {}
158
+ def __init__(self, *args, **kwargs):
159
+ super(XMLFormatter, self).__init__(self.XML, *args, **kwargs)
160
+
161
+
162
+ # Set up aliases for the default formatters.
163
+ HTMLFormatter.REGISTRY['html'] = HTMLFormatter(
164
+ entity_substitution=EntitySubstitution.substitute_html
165
+ )
166
+ HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
167
+ entity_substitution=EntitySubstitution.substitute_html,
168
+ void_element_close_prefix=None,
169
+ empty_attributes_are_booleans=True,
170
+ )
171
+ HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
172
+ entity_substitution=EntitySubstitution.substitute_xml
173
+ )
174
+ HTMLFormatter.REGISTRY[None] = HTMLFormatter(
175
+ entity_substitution=None
176
+ )
177
+ XMLFormatter.REGISTRY["html"] = XMLFormatter(
178
+ entity_substitution=EntitySubstitution.substitute_html
179
+ )
180
+ XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
181
+ entity_substitution=EntitySubstitution.substitute_xml
182
+ )
183
+ XMLFormatter.REGISTRY[None] = Formatter(
184
+ Formatter(Formatter.XML, entity_substitution=None)
185
+ )
vlmpy310/lib/python3.10/site-packages/bs4/tests/__init__.py ADDED
@@ -0,0 +1,1177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ """Helper classes for tests."""
3
+
4
+ # Use of this source code is governed by the MIT license.
5
+ __license__ = "MIT"
6
+
7
+ import pickle
8
+ import copy
9
+ import functools
10
+ import warnings
11
+ import pytest
12
+ from bs4 import BeautifulSoup
13
+ from bs4.element import (
14
+ CharsetMetaAttributeValue,
15
+ Comment,
16
+ ContentMetaAttributeValue,
17
+ Doctype,
18
+ PYTHON_SPECIFIC_ENCODINGS,
19
+ SoupStrainer,
20
+ Script,
21
+ Stylesheet,
22
+ Tag
23
+ )
24
+
25
+ from bs4.builder import (
26
+ DetectsXMLParsedAsHTML,
27
+ HTMLParserTreeBuilder,
28
+ XMLParsedAsHTMLWarning,
29
+ )
30
+ default_builder = HTMLParserTreeBuilder
31
+
32
+ # Some tests depend on specific third-party libraries. We use
33
+ # @pytest.mark.skipIf on the following conditionals to skip them
34
+ # if the libraries are not installed.
35
+ try:
36
+ from soupsieve import SelectorSyntaxError
37
+ SOUP_SIEVE_PRESENT = True
38
+ except ImportError:
39
+ SOUP_SIEVE_PRESENT = False
40
+
41
+ try:
42
+ import html5lib
43
+ HTML5LIB_PRESENT = True
44
+ except ImportError:
45
+ HTML5LIB_PRESENT = False
46
+
47
+ try:
48
+ import lxml.etree
49
+ LXML_PRESENT = True
50
+ LXML_VERSION = lxml.etree.LXML_VERSION
51
+ except ImportError:
52
+ LXML_PRESENT = False
53
+ LXML_VERSION = (0,)
54
+
55
+ BAD_DOCUMENT = """A bare string
56
+ <!DOCTYPE xsl:stylesheet SYSTEM "htmlent.dtd">
57
+ <!DOCTYPE xsl:stylesheet PUBLIC "htmlent.dtd">
58
+ <div><![CDATA[A CDATA section where it doesn't belong]]></div>
59
+ <div><svg><![CDATA[HTML5 does allow CDATA sections in SVG]]></svg></div>
60
+ <div>A <meta> tag</div>
61
+ <div>A <br> tag that supposedly has contents.</br></div>
62
+ <div>AT&T</div>
63
+ <div><textarea>Within a textarea, markup like <b> tags and <&<&amp; should be treated as literal</textarea></div>
64
+ <div><script>if (i < 2) { alert("<b>Markup within script tags should be treated as literal.</b>"); }</script></div>
65
+ <div>This numeric entity is missing the final semicolon: <x t="pi&#241ata"></div>
66
+ <div><a href="http://example.com/</a> that attribute value never got closed</div>
67
+ <div><a href="foo</a>, </a><a href="bar">that attribute value was closed by the subsequent tag</a></div>
68
+ <! This document starts with a bogus declaration ><div>a</div>
69
+ <div>This document contains <!an incomplete declaration <div>(do you see it?)</div>
70
+ <div>This document ends with <!an incomplete declaration
71
+ <div><a style={height:21px;}>That attribute value was bogus</a></div>
72
+ <! DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">The doctype is invalid because it contains extra whitespace
73
+ <div><table><td nowrap>That boolean attribute had no value</td></table></div>
74
+ <div>Here's a nonexistent entity: &#foo; (do you see it?)</div>
75
+ <div>This document ends before the entity finishes: &gt
76
+ <div><p>Paragraphs shouldn't contain block display elements, but this one does: <dl><dt>you see?</dt></p>
77
+ <b b="20" a="1" b="10" a="2" a="3" a="4">Multiple values for the same attribute.</b>
78
+ <div><table><tr><td>Here's a table</td></tr></table></div>
79
+ <div><table id="1"><tr><td>Here's a nested table:<table id="2"><tr><td>foo</td></tr></table></td></div>
80
+ <div>This tag contains nothing but whitespace: <b> </b></div>
81
+ <div><blockquote><p><b>This p tag is cut off by</blockquote></p>the end of the blockquote tag</div>
82
+ <div><table><div>This table contains bare markup</div></table></div>
83
+ <div><div id="1">\n <a href="link1">This link is never closed.\n</div>\n<div id="2">\n <div id="3">\n <a href="link2">This link is closed.</a>\n </div>\n</div></div>
84
+ <div>This document contains a <!DOCTYPE surprise>surprise doctype</div>
85
+ <div><a><B><Cd><EFG>Mixed case tags are folded to lowercase</efg></CD></b></A></div>
86
+ <div><our\u2603>Tag name contains Unicode characters</our\u2603></div>
87
+ <div><a \u2603="snowman">Attribute name contains Unicode characters</a></div>
88
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
89
+ """
90
+
91
+
92
+ class SoupTest(object):
93
+
94
+ @property
95
+ def default_builder(self):
96
+ return default_builder
97
+
98
+ def soup(self, markup, **kwargs):
99
+ """Build a Beautiful Soup object from markup."""
100
+ builder = kwargs.pop('builder', self.default_builder)
101
+ return BeautifulSoup(markup, builder=builder, **kwargs)
102
+
103
+ def document_for(self, markup, **kwargs):
104
+ """Turn an HTML fragment into a document.
105
+
106
+ The details depend on the builder.
107
+ """
108
+ return self.default_builder(**kwargs).test_fragment_to_document(markup)
109
+
110
+ def assert_soup(self, to_parse, compare_parsed_to=None):
111
+ """Parse some markup using Beautiful Soup and verify that
112
+ the output markup is as expected.
113
+ """
114
+ builder = self.default_builder
115
+ obj = BeautifulSoup(to_parse, builder=builder)
116
+ if compare_parsed_to is None:
117
+ compare_parsed_to = to_parse
118
+
119
+ # Verify that the documents come out the same.
120
+ assert obj.decode() == self.document_for(compare_parsed_to)
121
+
122
+ # Also run some checks on the BeautifulSoup object itself:
123
+
124
+ # Verify that every tag that was opened was eventually closed.
125
+
126
+ # There are no tags in the open tag counter.
127
+ assert all(v==0 for v in list(obj.open_tag_counter.values()))
128
+
129
+ # The only tag in the tag stack is the one for the root
130
+ # document.
131
+ assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack]
132
+
133
+ assertSoupEquals = assert_soup
134
+
135
+ def assertConnectedness(self, element):
136
+ """Ensure that next_element and previous_element are properly
137
+ set for all descendants of the given element.
138
+ """
139
+ earlier = None
140
+ for e in element.descendants:
141
+ if earlier:
142
+ assert e == earlier.next_element
143
+ assert earlier == e.previous_element
144
+ earlier = e
145
+
146
+ def linkage_validator(self, el, _recursive_call=False):
147
+ """Ensure proper linkage throughout the document."""
148
+ descendant = None
149
+ # Document element should have no previous element or previous sibling.
150
+ # It also shouldn't have a next sibling.
151
+ if el.parent is None:
152
+ assert el.previous_element is None,\
153
+ "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
154
+ el, el.previous_element, None
155
+ )
156
+ assert el.previous_sibling is None,\
157
+ "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
158
+ el, el.previous_sibling, None
159
+ )
160
+ assert el.next_sibling is None,\
161
+ "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format(
162
+ el, el.next_sibling, None
163
+ )
164
+
165
+ idx = 0
166
+ child = None
167
+ last_child = None
168
+ last_idx = len(el.contents) - 1
169
+ for child in el.contents:
170
+ descendant = None
171
+
172
+ # Parent should link next element to their first child
173
+ # That child should have no previous sibling
174
+ if idx == 0:
175
+ if el.parent is not None:
176
+ assert el.next_element is child,\
177
+ "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format(
178
+ el, el.next_element, child
179
+ )
180
+ assert child.previous_element is el,\
181
+ "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format(
182
+ child, child.previous_element, el
183
+ )
184
+ assert child.previous_sibling is None,\
185
+ "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format(
186
+ child, child.previous_sibling, None
187
+ )
188
+
189
+ # If not the first child, previous index should link as sibling to this index
190
+ # Previous element should match the last index or the last bubbled up descendant
191
+ else:
192
+ assert child.previous_sibling is el.contents[idx - 1],\
193
+ "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format(
194
+ child, child.previous_sibling, el.contents[idx - 1]
195
+ )
196
+ assert el.contents[idx - 1].next_sibling is child,\
197
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
198
+ el.contents[idx - 1], el.contents[idx - 1].next_sibling, child
199
+ )
200
+
201
+ if last_child is not None:
202
+ assert child.previous_element is last_child,\
203
+ "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format(
204
+ child, child.previous_element, last_child, child.parent.contents
205
+ )
206
+ assert last_child.next_element is child,\
207
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
208
+ last_child, last_child.next_element, child
209
+ )
210
+
211
+ if isinstance(child, Tag) and child.contents:
212
+ descendant = self.linkage_validator(child, True)
213
+ # A bubbled up descendant should have no next siblings
214
+ assert descendant.next_sibling is None,\
215
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
216
+ descendant, descendant.next_sibling, None
217
+ )
218
+
219
+ # Mark last child as either the bubbled up descendant or the current child
220
+ if descendant is not None:
221
+ last_child = descendant
222
+ else:
223
+ last_child = child
224
+
225
+ # If last child, there are non next siblings
226
+ if idx == last_idx:
227
+ assert child.next_sibling is None,\
228
+ "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
229
+ child, child.next_sibling, None
230
+ )
231
+ idx += 1
232
+
233
+ child = descendant if descendant is not None else child
234
+ if child is None:
235
+ child = el
236
+
237
+ if not _recursive_call and child is not None:
238
+ target = el
239
+ while True:
240
+ if target is None:
241
+ assert child.next_element is None, \
242
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
243
+ child, child.next_element, None
244
+ )
245
+ break
246
+ elif target.next_sibling is not None:
247
+ assert child.next_element is target.next_sibling, \
248
+ "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format(
249
+ child, child.next_element, target.next_sibling
250
+ )
251
+ break
252
+ target = target.parent
253
+
254
+ # We are done, so nothing to return
255
+ return None
256
+ else:
257
+ # Return the child to the recursive caller
258
+ return child
259
+
260
+ def assert_selects(self, tags, should_match):
261
+ """Make sure that the given tags have the correct text.
262
+
263
+ This is used in tests that define a bunch of tags, each
264
+ containing a single string, and then select certain strings by
265
+ some mechanism.
266
+ """
267
+ assert [tag.string for tag in tags] == should_match
268
+
269
+ def assert_selects_ids(self, tags, should_match):
270
+ """Make sure that the given tags have the correct IDs.
271
+
272
+ This is used in tests that define a bunch of tags, each
273
+ containing a single string, and then select certain strings by
274
+ some mechanism.
275
+ """
276
+ assert [tag['id'] for tag in tags] == should_match
277
+
278
+
279
+ class TreeBuilderSmokeTest(object):
280
+ # Tests that are common to HTML and XML tree builders.
281
+
282
+ @pytest.mark.parametrize(
283
+ "multi_valued_attributes",
284
+ [None, {}, dict(b=['class']), {'*': ['notclass']}]
285
+ )
286
+ def test_attribute_not_multi_valued(self, multi_valued_attributes):
287
+ markup = '<html xmlns="http://www.w3.org/1999/xhtml"><a class="a b c"></html>'
288
+ soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes)
289
+ assert soup.a['class'] == 'a b c'
290
+
291
+ @pytest.mark.parametrize(
292
+ "multi_valued_attributes", [dict(a=['class']), {'*': ['class']}]
293
+ )
294
+ def test_attribute_multi_valued(self, multi_valued_attributes):
295
+ markup = '<a class="a b c">'
296
+ soup = self.soup(
297
+ markup, multi_valued_attributes=multi_valued_attributes
298
+ )
299
+ assert soup.a['class'] == ['a', 'b', 'c']
300
+
301
+ def test_invalid_doctype(self):
302
+ markup = '<![if word]>content<![endif]>'
303
+ markup = '<!DOCTYPE html]ff>'
304
+ soup = self.soup(markup)
305
+
306
+ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
307
+
308
+ """A basic test of a treebuilder's competence.
309
+
310
+ Any HTML treebuilder, present or future, should be able to pass
311
+ these tests. With invalid markup, there's room for interpretation,
312
+ and different parsers can handle it differently. But with the
313
+ markup in these tests, there's not much room for interpretation.
314
+ """
315
+
316
+ def test_empty_element_tags(self):
317
+ """Verify that all HTML4 and HTML5 empty element (aka void element) tags
318
+ are handled correctly.
319
+ """
320
+ for name in [
321
+ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
322
+ 'spacer', 'frame'
323
+ ]:
324
+ soup = self.soup("")
325
+ new_tag = soup.new_tag(name)
326
+ assert new_tag.is_empty_element == True
327
+
328
+ def test_special_string_containers(self):
329
+ soup = self.soup(
330
+ "<style>Some CSS</style><script>Some Javascript</script>"
331
+ )
332
+ assert isinstance(soup.style.string, Stylesheet)
333
+ assert isinstance(soup.script.string, Script)
334
+
335
+ soup = self.soup(
336
+ "<style><!--Some CSS--></style>"
337
+ )
338
+ assert isinstance(soup.style.string, Stylesheet)
339
+ # The contents of the style tag resemble an HTML comment, but
340
+ # it's not treated as a comment.
341
+ assert soup.style.string == "<!--Some CSS-->"
342
+ assert isinstance(soup.style.string, Stylesheet)
343
+
344
+ def test_pickle_and_unpickle_identity(self):
345
+ # Pickling a tree, then unpickling it, yields a tree identical
346
+ # to the original.
347
+ tree = self.soup("<a><b>foo</a>")
348
+ dumped = pickle.dumps(tree, 2)
349
+ loaded = pickle.loads(dumped)
350
+ assert loaded.__class__ == BeautifulSoup
351
+ assert loaded.decode() == tree.decode()
352
+
353
+ def assertDoctypeHandled(self, doctype_fragment):
354
+ """Assert that a given doctype string is handled correctly."""
355
+ doctype_str, soup = self._document_with_doctype(doctype_fragment)
356
+
357
+ # Make sure a Doctype object was created.
358
+ doctype = soup.contents[0]
359
+ assert doctype.__class__ == Doctype
360
+ assert doctype == doctype_fragment
361
+ assert soup.encode("utf8")[:len(doctype_str)] == doctype_str
362
+
363
+ # Make sure that the doctype was correctly associated with the
364
+ # parse tree and that the rest of the document parsed.
365
+ assert soup.p.contents[0] == 'foo'
366
+
367
+ def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"):
368
+ """Generate and parse a document with the given doctype."""
369
+ doctype = '<!%s %s>' % (doctype_string, doctype_fragment)
370
+ markup = doctype + '\n<p>foo</p>'
371
+ soup = self.soup(markup)
372
+ return doctype.encode("utf8"), soup
373
+
374
+ def test_normal_doctypes(self):
375
+ """Make sure normal, everyday HTML doctypes are handled correctly."""
376
+ self.assertDoctypeHandled("html")
377
+ self.assertDoctypeHandled(
378
+ 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
379
+
380
+ def test_empty_doctype(self):
381
+ soup = self.soup("<!DOCTYPE>")
382
+ doctype = soup.contents[0]
383
+ assert "" == doctype.strip()
384
+
385
+ def test_mixed_case_doctype(self):
386
+ # A lowercase or mixed-case doctype becomes a Doctype.
387
+ for doctype_fragment in ("doctype", "DocType"):
388
+ doctype_str, soup = self._document_with_doctype(
389
+ "html", doctype_fragment
390
+ )
391
+
392
+ # Make sure a Doctype object was created and that the DOCTYPE
393
+ # is uppercase.
394
+ doctype = soup.contents[0]
395
+ assert doctype.__class__ == Doctype
396
+ assert doctype == "html"
397
+ assert soup.encode("utf8")[:len(doctype_str)] == b"<!DOCTYPE html>"
398
+
399
+ # Make sure that the doctype was correctly associated with the
400
+ # parse tree and that the rest of the document parsed.
401
+ assert soup.p.contents[0] == 'foo'
402
+
403
+ def test_public_doctype_with_url(self):
404
+ doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
405
+ self.assertDoctypeHandled(doctype)
406
+
407
+ def test_system_doctype(self):
408
+ self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
409
+
410
+ def test_namespaced_system_doctype(self):
411
+ # We can handle a namespaced doctype with a system ID.
412
+ self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
413
+
414
+ def test_namespaced_public_doctype(self):
415
+ # Test a namespaced doctype with a public id.
416
+ self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
417
+
418
+ def test_real_xhtml_document(self):
419
+ """A real XHTML document should come out more or less the same as it went in."""
420
+ markup = b"""<?xml version="1.0" encoding="utf-8"?>
421
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
422
+ <html xmlns="http://www.w3.org/1999/xhtml">
423
+ <head><title>Hello.</title></head>
424
+ <body>Goodbye.</body>
425
+ </html>"""
426
+ with warnings.catch_warnings(record=True) as w:
427
+ soup = self.soup(markup)
428
+ assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"")
429
+
430
+ # No warning was issued about parsing an XML document as HTML,
431
+ # because XHTML is both.
432
+ assert w == []
433
+
434
+
435
+ def test_namespaced_html(self):
436
+ # When a namespaced XML document is parsed as HTML it should
437
+ # be treated as HTML with weird tag names.
438
+ markup = b"""<ns1:foo>content</ns1:foo><ns1:foo/><ns2:foo/>"""
439
+ with warnings.catch_warnings(record=True) as w:
440
+ soup = self.soup(markup)
441
+
442
+ assert 2 == len(soup.find_all("ns1:foo"))
443
+
444
+ # n.b. no "you're parsing XML as HTML" warning was given
445
+ # because there was no XML declaration.
446
+ assert [] == w
447
+
448
+ def test_detect_xml_parsed_as_html(self):
449
+ # A warning is issued when parsing an XML document as HTML,
450
+ # but basic stuff should still work.
451
+ markup = b"""<?xml version="1.0" encoding="utf-8"?><tag>string</tag>"""
452
+ with warnings.catch_warnings(record=True) as w:
453
+ soup = self.soup(markup)
454
+ assert soup.tag.string == 'string'
455
+ [warning] = w
456
+ assert isinstance(warning.message, XMLParsedAsHTMLWarning)
457
+ assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE
458
+
459
+ # NOTE: the warning is not issued if the document appears to
460
+ # be XHTML (tested with test_real_xhtml_document in the
461
+ # superclass) or if there is no XML declaration (tested with
462
+ # test_namespaced_html in the superclass).
463
+
464
+ def test_processing_instruction(self):
465
+ # We test both Unicode and bytestring to verify that
466
+ # process_markup correctly sets processing_instruction_class
467
+ # even when the markup is already Unicode and there is no
468
+ # need to process anything.
469
+ markup = """<?PITarget PIContent?>"""
470
+ soup = self.soup(markup)
471
+ assert markup == soup.decode()
472
+
473
+ markup = b"""<?PITarget PIContent?>"""
474
+ soup = self.soup(markup)
475
+ assert markup == soup.encode("utf8")
476
+
477
+ def test_deepcopy(self):
478
+ """Make sure you can copy the tree builder.
479
+
480
+ This is important because the builder is part of a
481
+ BeautifulSoup object, and we want to be able to copy that.
482
+ """
483
+ copy.deepcopy(self.default_builder)
484
+
485
+ def test_p_tag_is_never_empty_element(self):
486
+ """A <p> tag is never designated as an empty-element tag.
487
+
488
+ Even if the markup shows it as an empty-element tag, it
489
+ shouldn't be presented that way.
490
+ """
491
+ soup = self.soup("<p/>")
492
+ assert not soup.p.is_empty_element
493
+ assert str(soup.p) == "<p></p>"
494
+
495
+ def test_unclosed_tags_get_closed(self):
496
+ """A tag that's not closed by the end of the document should be closed.
497
+
498
+ This applies to all tags except empty-element tags.
499
+ """
500
+ self.assert_soup("<p>", "<p></p>")
501
+ self.assert_soup("<b>", "<b></b>")
502
+
503
+ self.assert_soup("<br>", "<br/>")
504
+
505
+ def test_br_is_always_empty_element_tag(self):
506
+ """A <br> tag is designated as an empty-element tag.
507
+
508
+ Some parsers treat <br></br> as one <br/> tag, some parsers as
509
+ two tags, but it should always be an empty-element tag.
510
+ """
511
+ soup = self.soup("<br></br>")
512
+ assert soup.br.is_empty_element
513
+ assert str(soup.br) == "<br/>"
514
+
515
+ def test_nested_formatting_elements(self):
516
+ self.assert_soup("<em><em></em></em>")
517
+
518
+ def test_double_head(self):
519
+ html = '''<!DOCTYPE html>
520
+ <html>
521
+ <head>
522
+ <title>Ordinary HEAD element test</title>
523
+ </head>
524
+ <script type="text/javascript">
525
+ alert("Help!");
526
+ </script>
527
+ <body>
528
+ Hello, world!
529
+ </body>
530
+ </html>
531
+ '''
532
+ soup = self.soup(html)
533
+ assert "text/javascript" == soup.find('script')['type']
534
+
535
+ def test_comment(self):
536
+ # Comments are represented as Comment objects.
537
+ markup = "<p>foo<!--foobar-->baz</p>"
538
+ self.assert_soup(markup)
539
+
540
+ soup = self.soup(markup)
541
+ comment = soup.find(string="foobar")
542
+ assert comment.__class__ == Comment
543
+
544
+ # The comment is properly integrated into the tree.
545
+ foo = soup.find(string="foo")
546
+ assert comment == foo.next_element
547
+ baz = soup.find(string="baz")
548
+ assert comment == baz.previous_element
549
+
550
+ def test_preserved_whitespace_in_pre_and_textarea(self):
551
+ """Whitespace must be preserved in <pre> and <textarea> tags,
552
+ even if that would mean not prettifying the markup.
553
+ """
554
+ pre_markup = "<pre>a z</pre>\n"
555
+ textarea_markup = "<textarea> woo\nwoo </textarea>\n"
556
+ self.assert_soup(pre_markup)
557
+ self.assert_soup(textarea_markup)
558
+
559
+ soup = self.soup(pre_markup)
560
+ assert soup.pre.prettify() == pre_markup
561
+
562
+ soup = self.soup(textarea_markup)
563
+ assert soup.textarea.prettify() == textarea_markup
564
+
565
+ soup = self.soup("<textarea></textarea>")
566
+ assert soup.textarea.prettify() == "<textarea></textarea>\n"
567
+
568
+ def test_nested_inline_elements(self):
569
+ """Inline elements can be nested indefinitely."""
570
+ b_tag = "<b>Inside a B tag</b>"
571
+ self.assert_soup(b_tag)
572
+
573
+ nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
574
+ self.assert_soup(nested_b_tag)
575
+
576
+ double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
577
+ self.assert_soup(nested_b_tag)
578
+
579
+ def test_nested_block_level_elements(self):
580
+ """Block elements can be nested."""
581
+ soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
582
+ blockquote = soup.blockquote
583
+ assert blockquote.p.b.string == 'Foo'
584
+ assert blockquote.b.string == 'Foo'
585
+
586
+ def test_correctly_nested_tables(self):
587
+ """One table can go inside another one."""
588
+ markup = ('<table id="1">'
589
+ '<tr>'
590
+ "<td>Here's another table:"
591
+ '<table id="2">'
592
+ '<tr><td>foo</td></tr>'
593
+ '</table></td>')
594
+
595
+ self.assert_soup(
596
+ markup,
597
+ '<table id="1"><tr><td>Here\'s another table:'
598
+ '<table id="2"><tr><td>foo</td></tr></table>'
599
+ '</td></tr></table>')
600
+
601
+ self.assert_soup(
602
+ "<table><thead><tr><td>Foo</td></tr></thead>"
603
+ "<tbody><tr><td>Bar</td></tr></tbody>"
604
+ "<tfoot><tr><td>Baz</td></tr></tfoot></table>")
605
+
606
+ def test_multivalued_attribute_with_whitespace(self):
607
+ # Whitespace separating the values of a multi-valued attribute
608
+ # should be ignored.
609
+
610
+ markup = '<div class=" foo bar "></a>'
611
+ soup = self.soup(markup)
612
+ assert ['foo', 'bar'] == soup.div['class']
613
+
614
+ # If you search by the literal name of the class it's like the whitespace
615
+ # wasn't there.
616
+ assert soup.div == soup.find('div', class_="foo bar")
617
+
618
+ def test_deeply_nested_multivalued_attribute(self):
619
+ # html5lib can set the attributes of the same tag many times
620
+ # as it rearranges the tree. This has caused problems with
621
+ # multivalued attributes.
622
+ markup = '<table><div><div class="css"></div></div></table>'
623
+ soup = self.soup(markup)
624
+ assert ["css"] == soup.div.div['class']
625
+
626
+ def test_multivalued_attribute_on_html(self):
627
+ # html5lib uses a different API to set the attributes ot the
628
+ # <html> tag. This has caused problems with multivalued
629
+ # attributes.
630
+ markup = '<html class="a b"></html>'
631
+ soup = self.soup(markup)
632
+ assert ["a", "b"] == soup.html['class']
633
+
634
+ def test_angle_brackets_in_attribute_values_are_escaped(self):
635
+ self.assert_soup('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>')
636
+
637
+ def test_strings_resembling_character_entity_references(self):
638
+ # "&T" and "&p" look like incomplete character entities, but they are
639
+ # not.
640
+ self.assert_soup(
641
+ "<p>&bull; AT&T is in the s&p 500</p>",
642
+ "<p>\u2022 AT&amp;T is in the s&amp;p 500</p>"
643
+ )
644
+
645
+ def test_apos_entity(self):
646
+ self.assert_soup(
647
+ "<p>Bob&apos;s Bar</p>",
648
+ "<p>Bob's Bar</p>",
649
+ )
650
+
651
+ def test_entities_in_foreign_document_encoding(self):
652
+ # &#147; and &#148; are invalid numeric entities referencing
653
+ # Windows-1252 characters. &#45; references a character common
654
+ # to Windows-1252 and Unicode, and &#9731; references a
655
+ # character only found in Unicode.
656
+ #
657
+ # All of these entities should be converted to Unicode
658
+ # characters.
659
+ markup = "<p>&#147;Hello&#148; &#45;&#9731;</p>"
660
+ soup = self.soup(markup)
661
+ assert "“Hello” -☃" == soup.p.string
662
+
663
+ def test_entities_in_attributes_converted_to_unicode(self):
664
+ expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
665
+ self.assert_soup('<p id="pi&#241;ata"></p>', expect)
666
+ self.assert_soup('<p id="pi&#xf1;ata"></p>', expect)
667
+ self.assert_soup('<p id="pi&#Xf1;ata"></p>', expect)
668
+ self.assert_soup('<p id="pi&ntilde;ata"></p>', expect)
669
+
670
+ def test_entities_in_text_converted_to_unicode(self):
671
+ expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
672
+ self.assert_soup("<p>pi&#241;ata</p>", expect)
673
+ self.assert_soup("<p>pi&#xf1;ata</p>", expect)
674
+ self.assert_soup("<p>pi&#Xf1;ata</p>", expect)
675
+ self.assert_soup("<p>pi&ntilde;ata</p>", expect)
676
+
677
+ def test_quot_entity_converted_to_quotation_mark(self):
678
+ self.assert_soup("<p>I said &quot;good day!&quot;</p>",
679
+ '<p>I said "good day!"</p>')
680
+
681
+ def test_out_of_range_entity(self):
682
+ expect = "\N{REPLACEMENT CHARACTER}"
683
+ self.assert_soup("&#10000000000000;", expect)
684
+ self.assert_soup("&#x10000000000000;", expect)
685
+ self.assert_soup("&#1000000000;", expect)
686
+
687
+ def test_multipart_strings(self):
688
+ "Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
689
+ soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
690
+ assert "p" == soup.h2.string.next_element.name
691
+ assert "p" == soup.p.name
692
+ self.assertConnectedness(soup)
693
+
694
+ def test_empty_element_tags(self):
695
+ """Verify consistent handling of empty-element tags,
696
+ no matter how they come in through the markup.
697
+ """
698
+ self.assert_soup('<br/><br/><br/>', "<br/><br/><br/>")
699
+ self.assert_soup('<br /><br /><br />', "<br/><br/><br/>")
700
+
701
+ def test_head_tag_between_head_and_body(self):
702
+ "Prevent recurrence of a bug in the html5lib treebuilder."
703
+ content = """<html><head></head>
704
+ <link></link>
705
+ <body>foo</body>
706
+ </html>
707
+ """
708
+ soup = self.soup(content)
709
+ assert soup.html.body is not None
710
+ self.assertConnectedness(soup)
711
+
712
+ def test_multiple_copies_of_a_tag(self):
713
+ "Prevent recurrence of a bug in the html5lib treebuilder."
714
+ content = """<!DOCTYPE html>
715
+ <html>
716
+ <body>
717
+ <article id="a" >
718
+ <div><a href="1"></div>
719
+ <footer>
720
+ <a href="2"></a>
721
+ </footer>
722
+ </article>
723
+ </body>
724
+ </html>
725
+ """
726
+ soup = self.soup(content)
727
+ self.assertConnectedness(soup.article)
728
+
729
+ def test_basic_namespaces(self):
730
+ """Parsers don't need to *understand* namespaces, but at the
731
+ very least they should not choke on namespaces or lose
732
+ data."""
733
+
734
+ markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
735
+ soup = self.soup(markup)
736
+ assert markup == soup.encode()
737
+ html = soup.html
738
+ assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns']
739
+ assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml']
740
+ assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg']
741
+
742
+ def test_multivalued_attribute_value_becomes_list(self):
743
+ markup = b'<a class="foo bar">'
744
+ soup = self.soup(markup)
745
+ assert ['foo', 'bar'] == soup.a['class']
746
+
747
+ #
748
+ # Generally speaking, tests below this point are more tests of
749
+ # Beautiful Soup than tests of the tree builders. But parsers are
750
+ # weird, so we run these tests separately for every tree builder
751
+ # to detect any differences between them.
752
+ #
753
+
754
+ def test_can_parse_unicode_document(self):
755
+ # A seemingly innocuous document... but it's in Unicode! And
756
+ # it contains characters that can't be represented in the
757
+ # encoding found in the declaration! The horror!
758
+ markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
759
+ soup = self.soup(markup)
760
+ assert 'Sacr\xe9 bleu!' == soup.body.string
761
+
762
+ def test_soupstrainer(self):
763
+ """Parsers should be able to work with SoupStrainers."""
764
+ strainer = SoupStrainer("b")
765
+ soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
766
+ parse_only=strainer)
767
+ assert soup.decode() == "<b>bold</b>"
768
+
769
+ def test_single_quote_attribute_values_become_double_quotes(self):
770
+ self.assert_soup("<foo attr='bar'></foo>",
771
+ '<foo attr="bar"></foo>')
772
+
773
+ def test_attribute_values_with_nested_quotes_are_left_alone(self):
774
+ text = """<foo attr='bar "brawls" happen'>a</foo>"""
775
+ self.assert_soup(text)
776
+
777
+ def test_attribute_values_with_double_nested_quotes_get_quoted(self):
778
+ text = """<foo attr='bar "brawls" happen'>a</foo>"""
779
+ soup = self.soup(text)
780
+ soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
781
+ self.assert_soup(
782
+ soup.foo.decode(),
783
+ """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""")
784
+
785
+ def test_ampersand_in_attribute_value_gets_escaped(self):
786
+ self.assert_soup('<this is="really messed up & stuff"></this>',
787
+ '<this is="really messed up &amp; stuff"></this>')
788
+
789
+ self.assert_soup(
790
+ '<a href="http://example.org?a=1&b=2;3">foo</a>',
791
+ '<a href="http://example.org?a=1&amp;b=2;3">foo</a>')
792
+
793
+ def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
794
+ self.assert_soup('<a href="http://example.org?a=1&amp;b=2;3"></a>')
795
+
796
+ def test_entities_in_strings_converted_during_parsing(self):
797
+ # Both XML and HTML entities are converted to Unicode characters
798
+ # during parsing.
799
+ text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>"
800
+ expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>"
801
+ self.assert_soup(text, expected)
802
+
803
+ def test_smart_quotes_converted_on_the_way_in(self):
804
+ # Microsoft smart quotes are converted to Unicode characters during
805
+ # parsing.
806
+ quote = b"<p>\x91Foo\x92</p>"
807
+ soup = self.soup(quote)
808
+ assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}"
809
+
810
+ def test_non_breaking_spaces_converted_on_the_way_in(self):
811
+ soup = self.soup("<a>&nbsp;&nbsp;</a>")
812
+ assert soup.a.string == "\N{NO-BREAK SPACE}" * 2
813
+
814
+ def test_entities_converted_on_the_way_out(self):
815
+ text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>"
816
+ expected = "<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode("utf-8")
817
+ soup = self.soup(text)
818
+ assert soup.p.encode("utf-8") == expected
819
+
820
+ def test_real_iso_8859_document(self):
821
+ # Smoke test of interrelated functionality, using an
822
+ # easy-to-understand document.
823
+
824
+ # Here it is in Unicode. Note that it claims to be in ISO-8859-1.
825
+ unicode_html = '<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
826
+
827
+ # That's because we're going to encode it into ISO-8859-1,
828
+ # and use that to test.
829
+ iso_latin_html = unicode_html.encode("iso-8859-1")
830
+
831
+ # Parse the ISO-8859-1 HTML.
832
+ soup = self.soup(iso_latin_html)
833
+
834
+ # Encode it to UTF-8.
835
+ result = soup.encode("utf-8")
836
+
837
+ # What do we expect the result to look like? Well, it would
838
+ # look like unicode_html, except that the META tag would say
839
+ # UTF-8 instead of ISO-8859-1.
840
+ expected = unicode_html.replace("ISO-8859-1", "utf-8")
841
+
842
+ # And, of course, it would be in UTF-8, not Unicode.
843
+ expected = expected.encode("utf-8")
844
+
845
+ # Ta-da!
846
+ assert result == expected
847
+
848
+ def test_real_shift_jis_document(self):
849
+ # Smoke test to make sure the parser can handle a document in
850
+ # Shift-JIS encoding, without choking.
851
+ shift_jis_html = (
852
+ b'<html><head></head><body><pre>'
853
+ b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
854
+ b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
855
+ b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
856
+ b'</pre></body></html>')
857
+ unicode_html = shift_jis_html.decode("shift-jis")
858
+ soup = self.soup(unicode_html)
859
+
860
+ # Make sure the parse tree is correctly encoded to various
861
+ # encodings.
862
+ assert soup.encode("utf-8") == unicode_html.encode("utf-8")
863
+ assert soup.encode("euc_jp") == unicode_html.encode("euc_jp")
864
+
865
+ def test_real_hebrew_document(self):
866
+ # A real-world test to make sure we can convert ISO-8859-9 (a
867
+ # Hebrew encoding) to UTF-8.
868
+ hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
869
+ soup = self.soup(
870
+ hebrew_document, from_encoding="iso8859-8")
871
+ # Some tree builders call it iso8859-8, others call it iso-8859-9.
872
+ # That's not a difference we really care about.
873
+ assert soup.original_encoding in ('iso8859-8', 'iso-8859-8')
874
+ assert soup.encode('utf-8') == (
875
+ hebrew_document.decode("iso8859-8").encode("utf-8")
876
+ )
877
+
878
+ def test_meta_tag_reflects_current_encoding(self):
879
+ # Here's the <meta> tag saying that a document is
880
+ # encoded in Shift-JIS.
881
+ meta_tag = ('<meta content="text/html; charset=x-sjis" '
882
+ 'http-equiv="Content-type"/>')
883
+
884
+ # Here's a document incorporating that meta tag.
885
+ shift_jis_html = (
886
+ '<html><head>\n%s\n'
887
+ '<meta http-equiv="Content-language" content="ja"/>'
888
+ '</head><body>Shift-JIS markup goes here.') % meta_tag
889
+ soup = self.soup(shift_jis_html)
890
+
891
+ # Parse the document, and the charset is seemingly unaffected.
892
+ parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
893
+ content = parsed_meta['content']
894
+ assert 'text/html; charset=x-sjis' == content
895
+
896
+ # But that value is actually a ContentMetaAttributeValue object.
897
+ assert isinstance(content, ContentMetaAttributeValue)
898
+
899
+ # And it will take on a value that reflects its current
900
+ # encoding.
901
+ assert 'text/html; charset=utf8' == content.encode("utf8")
902
+
903
+ # For the rest of the story, see TestSubstitutions in
904
+ # test_tree.py.
905
+
906
+ def test_html5_style_meta_tag_reflects_current_encoding(self):
907
+ # Here's the <meta> tag saying that a document is
908
+ # encoded in Shift-JIS.
909
+ meta_tag = ('<meta id="encoding" charset="x-sjis" />')
910
+
911
+ # Here's a document incorporating that meta tag.
912
+ shift_jis_html = (
913
+ '<html><head>\n%s\n'
914
+ '<meta http-equiv="Content-language" content="ja"/>'
915
+ '</head><body>Shift-JIS markup goes here.') % meta_tag
916
+ soup = self.soup(shift_jis_html)
917
+
918
+ # Parse the document, and the charset is seemingly unaffected.
919
+ parsed_meta = soup.find('meta', id="encoding")
920
+ charset = parsed_meta['charset']
921
+ assert 'x-sjis' == charset
922
+
923
+ # But that value is actually a CharsetMetaAttributeValue object.
924
+ assert isinstance(charset, CharsetMetaAttributeValue)
925
+
926
+ # And it will take on a value that reflects its current
927
+ # encoding.
928
+ assert 'utf8' == charset.encode("utf8")
929
+
930
+ def test_python_specific_encodings_not_used_in_charset(self):
931
+ # You can encode an HTML document using a Python-specific
932
+ # encoding, but that encoding won't be mentioned _inside_ the
933
+ # resulting document. Instead, the document will appear to
934
+ # have no encoding.
935
+ for markup in [
936
+ b'<meta charset="utf8"></head>'
937
+ b'<meta id="encoding" charset="utf-8" />'
938
+ ]:
939
+ soup = self.soup(markup)
940
+ for encoding in PYTHON_SPECIFIC_ENCODINGS:
941
+ if encoding in (
942
+ 'idna', 'mbcs', 'oem', 'undefined',
943
+ 'string_escape', 'string-escape'
944
+ ):
945
+ # For one reason or another, these will raise an
946
+ # exception if we actually try to use them, so don't
947
+ # bother.
948
+ continue
949
+ encoded = soup.encode(encoding)
950
+ assert b'meta charset=""' in encoded
951
+ assert encoding.encode("ascii") not in encoded
952
+
953
+ def test_tag_with_no_attributes_can_have_attributes_added(self):
954
+ data = self.soup("<a>text</a>")
955
+ data.a['foo'] = 'bar'
956
+ assert '<a foo="bar">text</a>' == data.a.decode()
957
+
958
+ def test_closing_tag_with_no_opening_tag(self):
959
+ # Without BeautifulSoup.open_tag_counter, the </span> tag will
960
+ # cause _popToTag to be called over and over again as we look
961
+ # for a <span> tag that wasn't there. The result is that 'text2'
962
+ # will show up outside the body of the document.
963
+ soup = self.soup("<body><div><p>text1</p></span>text2</div></body>")
964
+ assert "<body><div><p>text1</p>text2</div></body>" == soup.body.decode()
965
+
966
+ def test_worst_case(self):
967
+ """Test the worst case (currently) for linking issues."""
968
+
969
+ soup = self.soup(BAD_DOCUMENT)
970
+ self.linkage_validator(soup)
971
+
972
+
973
+ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
974
+
975
+ def test_pickle_and_unpickle_identity(self):
976
+ # Pickling a tree, then unpickling it, yields a tree identical
977
+ # to the original.
978
+ tree = self.soup("<a><b>foo</a>")
979
+ dumped = pickle.dumps(tree, 2)
980
+ loaded = pickle.loads(dumped)
981
+ assert loaded.__class__ == BeautifulSoup
982
+ assert loaded.decode() == tree.decode()
983
+
984
+ def test_docstring_generated(self):
985
+ soup = self.soup("<root/>")
986
+ assert soup.encode() == b'<?xml version="1.0" encoding="utf-8"?>\n<root/>'
987
+
988
+ def test_xml_declaration(self):
989
+ markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>"""
990
+ soup = self.soup(markup)
991
+ assert markup == soup.encode("utf8")
992
+
993
+ def test_python_specific_encodings_not_used_in_xml_declaration(self):
994
+ # You can encode an XML document using a Python-specific
995
+ # encoding, but that encoding won't be mentioned _inside_ the
996
+ # resulting document.
997
+ markup = b"""<?xml version="1.0"?>\n<foo/>"""
998
+ soup = self.soup(markup)
999
+ for encoding in PYTHON_SPECIFIC_ENCODINGS:
1000
+ if encoding in (
1001
+ 'idna', 'mbcs', 'oem', 'undefined',
1002
+ 'string_escape', 'string-escape'
1003
+ ):
1004
+ # For one reason or another, these will raise an
1005
+ # exception if we actually try to use them, so don't
1006
+ # bother.
1007
+ continue
1008
+ encoded = soup.encode(encoding)
1009
+ assert b'<?xml version="1.0"?>' in encoded
1010
+ assert encoding.encode("ascii") not in encoded
1011
+
1012
+ def test_processing_instruction(self):
1013
+ markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>"""
1014
+ soup = self.soup(markup)
1015
+ assert markup == soup.encode("utf8")
1016
+
1017
+ def test_real_xhtml_document(self):
1018
+ """A real XHTML document should come out *exactly* the same as it went in."""
1019
+ markup = b"""<?xml version="1.0" encoding="utf-8"?>
1020
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
1021
+ <html xmlns="http://www.w3.org/1999/xhtml">
1022
+ <head><title>Hello.</title></head>
1023
+ <body>Goodbye.</body>
1024
+ </html>"""
1025
+ soup = self.soup(markup)
1026
+ assert soup.encode("utf-8") == markup
1027
+
1028
+ def test_nested_namespaces(self):
1029
+ doc = b"""<?xml version="1.0" encoding="utf-8"?>
1030
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
1031
+ <parent xmlns="http://ns1/">
1032
+ <child xmlns="http://ns2/" xmlns:ns3="http://ns3/">
1033
+ <grandchild ns3:attr="value" xmlns="http://ns4/"/>
1034
+ </child>
1035
+ </parent>"""
1036
+ soup = self.soup(doc)
1037
+ assert doc == soup.encode()
1038
+
1039
+ def test_formatter_processes_script_tag_for_xml_documents(self):
1040
+ doc = """
1041
+ <script type="text/javascript">
1042
+ </script>
1043
+ """
1044
+ soup = BeautifulSoup(doc, "lxml-xml")
1045
+ # lxml would have stripped this while parsing, but we can add
1046
+ # it later.
1047
+ soup.script.string = 'console.log("< < hey > > ");'
1048
+ encoded = soup.encode()
1049
+ assert b"&lt; &lt; hey &gt; &gt;" in encoded
1050
+
1051
+ def test_can_parse_unicode_document(self):
1052
+ markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
1053
+ soup = self.soup(markup)
1054
+ assert 'Sacr\xe9 bleu!' == soup.root.string
1055
+
1056
+ def test_can_parse_unicode_document_begining_with_bom(self):
1057
+ markup = '\N{BYTE ORDER MARK}<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
1058
+ soup = self.soup(markup)
1059
+ assert 'Sacr\xe9 bleu!' == soup.root.string
1060
+
1061
+ def test_popping_namespaced_tag(self):
1062
+ markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
1063
+ soup = self.soup(markup)
1064
+ assert str(soup.rss) == markup
1065
+
1066
+ def test_docstring_includes_correct_encoding(self):
1067
+ soup = self.soup("<root/>")
1068
+ assert soup.encode("latin1") == b'<?xml version="1.0" encoding="latin1"?>\n<root/>'
1069
+
1070
+ def test_large_xml_document(self):
1071
+ """A large XML document should come out the same as it went in."""
1072
+ markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
1073
+ + b'0' * (2**12)
1074
+ + b'</root>')
1075
+ soup = self.soup(markup)
1076
+ assert soup.encode("utf-8") == markup
1077
+
1078
+ def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
1079
+ self.assert_soup("<p>", "<p/>")
1080
+ self.assert_soup("<p>foo</p>")
1081
+
1082
+ def test_namespaces_are_preserved(self):
1083
+ markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
1084
+ soup = self.soup(markup)
1085
+ root = soup.root
1086
+ assert "http://example.com/" == root['xmlns:a']
1087
+ assert "http://example.net/" == root['xmlns:b']
1088
+
1089
+ def test_closing_namespaced_tag(self):
1090
+ markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
1091
+ soup = self.soup(markup)
1092
+ assert str(soup.p) == markup
1093
+
1094
+ def test_namespaced_attributes(self):
1095
+ markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
1096
+ soup = self.soup(markup)
1097
+ assert str(soup.foo) == markup
1098
+
1099
+ def test_namespaced_attributes_xml_namespace(self):
1100
+ markup = '<foo xml:lang="fr">bar</foo>'
1101
+ soup = self.soup(markup)
1102
+ assert str(soup.foo) == markup
1103
+
1104
+ def test_find_by_prefixed_name(self):
1105
+ doc = """<?xml version="1.0" encoding="utf-8"?>
1106
+ <Document xmlns="http://example.com/ns0"
1107
+ xmlns:ns1="http://example.com/ns1"
1108
+ xmlns:ns2="http://example.com/ns2">
1109
+ <ns1:tag>foo</ns1:tag>
1110
+ <ns1:tag>bar</ns1:tag>
1111
+ <ns2:tag key="value">baz</ns2:tag>
1112
+ </Document>
1113
+ """
1114
+ soup = self.soup(doc)
1115
+
1116
+ # There are three <tag> tags.
1117
+ assert 3 == len(soup.find_all('tag'))
1118
+
1119
+ # But two of them are ns1:tag and one of them is ns2:tag.
1120
+ assert 2 == len(soup.find_all('ns1:tag'))
1121
+ assert 1 == len(soup.find_all('ns2:tag'))
1122
+
1123
+ assert 1, len(soup.find_all('ns2:tag', key='value'))
1124
+ assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag']))
1125
+
1126
+ def test_copy_tag_preserves_namespace(self):
1127
+ xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
1128
+ <w:document xmlns:w="http://example.com/ns0"/>"""
1129
+
1130
+ soup = self.soup(xml)
1131
+ tag = soup.document
1132
+ duplicate = copy.copy(tag)
1133
+
1134
+ # The two tags have the same namespace prefix.
1135
+ assert tag.prefix == duplicate.prefix
1136
+
1137
+ def test_worst_case(self):
1138
+ """Test the worst case (currently) for linking issues."""
1139
+
1140
+ soup = self.soup(BAD_DOCUMENT)
1141
+ self.linkage_validator(soup)
1142
+
1143
+
1144
+ class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
1145
+ """Smoke test for a tree builder that supports HTML5."""
1146
+
1147
+ def test_real_xhtml_document(self):
1148
+ # Since XHTML is not HTML5, HTML5 parsers are not tested to handle
1149
+ # XHTML documents in any particular way.
1150
+ pass
1151
+
1152
+ def test_html_tags_have_namespace(self):
1153
+ markup = "<a>"
1154
+ soup = self.soup(markup)
1155
+ assert "http://www.w3.org/1999/xhtml" == soup.a.namespace
1156
+
1157
+ def test_svg_tags_have_namespace(self):
1158
+ markup = '<svg><circle/></svg>'
1159
+ soup = self.soup(markup)
1160
+ namespace = "http://www.w3.org/2000/svg"
1161
+ assert namespace == soup.svg.namespace
1162
+ assert namespace == soup.circle.namespace
1163
+
1164
+
1165
+ def test_mathml_tags_have_namespace(self):
1166
+ markup = '<math><msqrt>5</msqrt></math>'
1167
+ soup = self.soup(markup)
1168
+ namespace = 'http://www.w3.org/1998/Math/MathML'
1169
+ assert namespace == soup.math.namespace
1170
+ assert namespace == soup.msqrt.namespace
1171
+
1172
+ def test_xml_declaration_becomes_comment(self):
1173
+ markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
1174
+ soup = self.soup(markup)
1175
+ assert isinstance(soup.contents[0], Comment)
1176
+ assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?'
1177
+ assert "html" == soup.contents[0].next_element.name
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (45.8 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_dammit.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_element.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_formatter.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_fuzz.cpython-310.pyc ADDED
Binary file (4.67 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_html5lib.cpython-310.pyc ADDED
Binary file (7.9 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_lxml.cpython-310.pyc ADDED
Binary file (5.55 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/__pycache__/test_navigablestring.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ �� � <css
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ �<!DOCTyPEV PUBLIC'''�'
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ )<a><math><TR><a><mI><a><p><a>
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase ADDED
Binary file (15.3 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase ADDED
Binary file (19.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase ADDED
Binary file (12 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ � ><applet></applet><applet></applet><apple|><applet><applet><appl��><applet><applet></applet></applet></applet></applet><applet></applet><apple>t<applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet>et><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><azplet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><plet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet><applet></applet></applet></applet></applet></appt></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet></applet><<meta charset=utf-8>
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase ADDED
Binary file (11.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ <![
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ -<math><sElect><mi><sElect><sElect>
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ )<math><math><math><math><math><math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&)<math><math><annotation-xul>&
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ �<table><svg><html>
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase ADDED
@@ -0,0 +1 @@
 
 
1
+ - �� <math><select><mi><select><select>t
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase ADDED
Binary file (3.55 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase ADDED
Binary file (124 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase ADDED
Binary file (2.61 kB). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase ADDED
Binary file (103 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_builder.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from unittest.mock import patch
3
+ from bs4.builder import DetectsXMLParsedAsHTML
4
+
5
+ class TestDetectsXMLParsedAsHTML(object):
6
+
7
+ @pytest.mark.parametrize(
8
+ "markup,looks_like_xml",
9
+ [("No xml declaration", False),
10
+ ("<html>obviously HTML</html", False),
11
+ ("<?xml ><html>Actually XHTML</html>", False),
12
+ ("<?xml> < html>Tricky XHTML</html>", False),
13
+ ("<?xml ><no-html-tag>", True),
14
+ ]
15
+ )
16
+ def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml):
17
+ # Test of our ability to guess at whether markup looks XML-ish
18
+ # _and_ not HTML-ish.
19
+ with patch('bs4.builder.DetectsXMLParsedAsHTML._warn') as mock:
20
+ for data in markup, markup.encode('utf8'):
21
+ result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
22
+ data
23
+ )
24
+ assert result == looks_like_xml
25
+ if looks_like_xml:
26
+ assert mock.called
27
+ else:
28
+ assert not mock.called
29
+ mock.reset_mock()
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_builder_registry.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests of the builder registry."""
2
+
3
+ import pytest
4
+ import warnings
5
+
6
+ from bs4 import BeautifulSoup
7
+ from bs4.builder import (
8
+ builder_registry as registry,
9
+ HTMLParserTreeBuilder,
10
+ TreeBuilderRegistry,
11
+ )
12
+
13
+ from . import (
14
+ HTML5LIB_PRESENT,
15
+ LXML_PRESENT,
16
+ )
17
+
18
+ if HTML5LIB_PRESENT:
19
+ from bs4.builder import HTML5TreeBuilder
20
+
21
+ if LXML_PRESENT:
22
+ from bs4.builder import (
23
+ LXMLTreeBuilderForXML,
24
+ LXMLTreeBuilder,
25
+ )
26
+
27
+
28
+ # TODO: Split out the lxml and html5lib tests into their own classes
29
+ # and gate with pytest.mark.skipIf.
30
+ class TestBuiltInRegistry(object):
31
+ """Test the built-in registry with the default builders registered."""
32
+
33
+ def test_combination(self):
34
+ assert registry.lookup('strict', 'html') == HTMLParserTreeBuilder
35
+ if LXML_PRESENT:
36
+ assert registry.lookup('fast', 'html') == LXMLTreeBuilder
37
+ assert registry.lookup('permissive', 'xml') == LXMLTreeBuilderForXML
38
+ if HTML5LIB_PRESENT:
39
+ assert registry.lookup('html5lib', 'html') == HTML5TreeBuilder
40
+
41
+ def test_lookup_by_markup_type(self):
42
+ if LXML_PRESENT:
43
+ assert registry.lookup('html') == LXMLTreeBuilder
44
+ assert registry.lookup('xml') == LXMLTreeBuilderForXML
45
+ else:
46
+ assert registry.lookup('xml') == None
47
+ if HTML5LIB_PRESENT:
48
+ assert registry.lookup('html') == HTML5TreeBuilder
49
+ else:
50
+ assert registry.lookup('html') == HTMLParserTreeBuilder
51
+
52
+ def test_named_library(self):
53
+ if LXML_PRESENT:
54
+ assert registry.lookup('lxml', 'xml') == LXMLTreeBuilderForXML
55
+ assert registry.lookup('lxml', 'html') == LXMLTreeBuilder
56
+ if HTML5LIB_PRESENT:
57
+ assert registry.lookup('html5lib') == HTML5TreeBuilder
58
+
59
+ assert registry.lookup('html.parser') == HTMLParserTreeBuilder
60
+
61
+ def test_beautifulsoup_constructor_does_lookup(self):
62
+
63
+ with warnings.catch_warnings(record=True) as w:
64
+ # This will create a warning about not explicitly
65
+ # specifying a parser, but we'll ignore it.
66
+
67
+ # You can pass in a string.
68
+ BeautifulSoup("", features="html")
69
+ # Or a list of strings.
70
+ BeautifulSoup("", features=["html", "fast"])
71
+ pass
72
+
73
+ # You'll get an exception if BS can't find an appropriate
74
+ # builder.
75
+ with pytest.raises(ValueError):
76
+ BeautifulSoup("", features="no-such-feature")
77
+
78
+ class TestRegistry(object):
79
+ """Test the TreeBuilderRegistry class in general."""
80
+
81
+ def setup_method(self):
82
+ self.registry = TreeBuilderRegistry()
83
+
84
+ def builder_for_features(self, *feature_list):
85
+ cls = type('Builder_' + '_'.join(feature_list),
86
+ (object,), {'features' : feature_list})
87
+
88
+ self.registry.register(cls)
89
+ return cls
90
+
91
+ def test_register_with_no_features(self):
92
+ builder = self.builder_for_features()
93
+
94
+ # Since the builder advertises no features, you can't find it
95
+ # by looking up features.
96
+ assert self.registry.lookup('foo') is None
97
+
98
+ # But you can find it by doing a lookup with no features, if
99
+ # this happens to be the only registered builder.
100
+ assert self.registry.lookup() == builder
101
+
102
+ def test_register_with_features_makes_lookup_succeed(self):
103
+ builder = self.builder_for_features('foo', 'bar')
104
+ assert self.registry.lookup('foo') is builder
105
+ assert self.registry.lookup('bar') is builder
106
+
107
+ def test_lookup_fails_when_no_builder_implements_feature(self):
108
+ builder = self.builder_for_features('foo', 'bar')
109
+ assert self.registry.lookup('baz') is None
110
+
111
+ def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
112
+ builder1 = self.builder_for_features('foo')
113
+ builder2 = self.builder_for_features('bar')
114
+ assert self.registry.lookup() == builder2
115
+
116
+ def test_lookup_fails_when_no_tree_builders_registered(self):
117
+ assert self.registry.lookup() is None
118
+
119
+ def test_lookup_gets_most_recent_builder_supporting_all_features(self):
120
+ has_one = self.builder_for_features('foo')
121
+ has_the_other = self.builder_for_features('bar')
122
+ has_both_early = self.builder_for_features('foo', 'bar', 'baz')
123
+ has_both_late = self.builder_for_features('foo', 'bar', 'quux')
124
+ lacks_one = self.builder_for_features('bar')
125
+ has_the_other = self.builder_for_features('foo')
126
+
127
+ # There are two builders featuring 'foo' and 'bar', but
128
+ # the one that also features 'quux' was registered later.
129
+ assert self.registry.lookup('foo', 'bar') == has_both_late
130
+
131
+ # There is only one builder featuring 'foo', 'bar', and 'baz'.
132
+ assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early
133
+
134
+ def test_lookup_fails_when_cannot_reconcile_requested_features(self):
135
+ builder1 = self.builder_for_features('foo', 'bar')
136
+ builder2 = self.builder_for_features('foo', 'baz')
137
+ assert self.registry.lookup('bar', 'baz') is None
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_css.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import types
3
+ from unittest.mock import MagicMock
4
+
5
+ from bs4 import (
6
+ CSS,
7
+ BeautifulSoup,
8
+ ResultSet,
9
+ )
10
+
11
+ from . import (
12
+ SoupTest,
13
+ SOUP_SIEVE_PRESENT,
14
+ )
15
+
16
+ if SOUP_SIEVE_PRESENT:
17
+ from soupsieve import SelectorSyntaxError
18
+
19
+
20
+ @pytest.mark.skipif(not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed")
21
+ class TestCSSSelectors(SoupTest):
22
+ """Test basic CSS selector functionality.
23
+
24
+ This functionality is implemented in soupsieve, which has a much
25
+ more comprehensive test suite, so this is basically an extra check
26
+ that soupsieve works as expected.
27
+ """
28
+
29
+ HTML = """
30
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
31
+ "http://www.w3.org/TR/html4/strict.dtd">
32
+ <html>
33
+ <head>
34
+ <title>The title</title>
35
+ <link rel="stylesheet" href="blah.css" type="text/css" id="l1">
36
+ </head>
37
+ <body>
38
+ <custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
39
+ <div id="main" class="fancy">
40
+ <div id="inner">
41
+ <h1 id="header1">An H1</h1>
42
+ <p>Some text</p>
43
+ <p class="onep" id="p1">Some more text</p>
44
+ <h2 id="header2">An H2</h2>
45
+ <p class="class1 class2 class3" id="pmulti">Another</p>
46
+ <a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
47
+ <h2 id="header3">Another H2</h2>
48
+ <a id="me" href="http://simonwillison.net/" rel="me">me</a>
49
+ <span class="s1">
50
+ <a href="#" id="s1a1">span1a1</a>
51
+ <a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
52
+ <span class="span2">
53
+ <a href="#" id="s2a1">span2a1</a>
54
+ </span>
55
+ <span class="span3"></span>
56
+ <custom-dashed-tag class="dashed" id="dash2"/>
57
+ <div data-tag="dashedvalue" id="data1"/>
58
+ </span>
59
+ </div>
60
+ <x id="xid">
61
+ <z id="zida"/>
62
+ <z id="zidab"/>
63
+ <z id="zidac"/>
64
+ </x>
65
+ <y id="yid">
66
+ <z id="zidb"/>
67
+ </y>
68
+ <p lang="en" id="lang-en">English</p>
69
+ <p lang="en-gb" id="lang-en-gb">English UK</p>
70
+ <p lang="en-us" id="lang-en-us">English US</p>
71
+ <p lang="fr" id="lang-fr">French</p>
72
+ </div>
73
+
74
+ <div id="footer">
75
+ </div>
76
+ """
77
+
78
+ def setup_method(self):
79
+ self.soup = BeautifulSoup(self.HTML, 'html.parser')
80
+
81
+ def assert_selects(self, selector, expected_ids, **kwargs):
82
+ results = self.soup.select(selector, **kwargs)
83
+ assert isinstance(results, ResultSet)
84
+ el_ids = [el['id'] for el in results]
85
+ el_ids.sort()
86
+ expected_ids.sort()
87
+ assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % (
88
+ selector, ', '.join(expected_ids), ', '.join(el_ids)
89
+ )
90
+
91
+ assertSelect = assert_selects
92
+
93
+ def assert_select_multiple(self, *tests):
94
+ for selector, expected_ids in tests:
95
+ self.assert_selects(selector, expected_ids)
96
+
97
+ def test_precompiled(self):
98
+ sel = self.soup.css.compile('div')
99
+
100
+ els = self.soup.select(sel)
101
+ assert len(els) == 4
102
+ for div in els:
103
+ assert div.name == 'div'
104
+
105
+ el = self.soup.select_one(sel)
106
+ assert 'main' == el['id']
107
+
108
+ def test_one_tag_one(self):
109
+ els = self.soup.select('title')
110
+ assert len(els) == 1
111
+ assert els[0].name == 'title'
112
+ assert els[0].contents == ['The title']
113
+
114
+ def test_one_tag_many(self):
115
+ els = self.soup.select('div')
116
+ assert len(els) == 4
117
+ for div in els:
118
+ assert div.name == 'div'
119
+
120
+ el = self.soup.select_one('div')
121
+ assert 'main' == el['id']
122
+
123
+ def test_select_one_returns_none_if_no_match(self):
124
+ match = self.soup.select_one('nonexistenttag')
125
+ assert None == match
126
+
127
+
128
+ def test_tag_in_tag_one(self):
129
+ els = self.soup.select('div div')
130
+ self.assert_selects('div div', ['inner', 'data1'])
131
+
132
+ def test_tag_in_tag_many(self):
133
+ for selector in ('html div', 'html body div', 'body div'):
134
+ self.assert_selects(selector, ['data1', 'main', 'inner', 'footer'])
135
+
136
+
137
+ def test_limit(self):
138
+ self.assert_selects('html div', ['main'], limit=1)
139
+ self.assert_selects('html body div', ['inner', 'main'], limit=2)
140
+ self.assert_selects('body div', ['data1', 'main', 'inner', 'footer'],
141
+ limit=10)
142
+
143
+ def test_tag_no_match(self):
144
+ assert len(self.soup.select('del')) == 0
145
+
146
+ def test_invalid_tag(self):
147
+ with pytest.raises(SelectorSyntaxError):
148
+ self.soup.select('tag%t')
149
+
150
+ def test_select_dashed_tag_ids(self):
151
+ self.assert_selects('custom-dashed-tag', ['dash1', 'dash2'])
152
+
153
+ def test_select_dashed_by_id(self):
154
+ dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
155
+ assert dashed[0].name == 'custom-dashed-tag'
156
+ assert dashed[0]['id'] == 'dash2'
157
+
158
+ def test_dashed_tag_text(self):
159
+ assert self.soup.select('body > custom-dashed-tag')[0].text == 'Hello there.'
160
+
161
+ def test_select_dashed_matches_find_all(self):
162
+ assert self.soup.select('custom-dashed-tag') == self.soup.find_all('custom-dashed-tag')
163
+
164
+ def test_header_tags(self):
165
+ self.assert_select_multiple(
166
+ ('h1', ['header1']),
167
+ ('h2', ['header2', 'header3']),
168
+ )
169
+
170
+ def test_class_one(self):
171
+ for selector in ('.onep', 'p.onep', 'html p.onep'):
172
+ els = self.soup.select(selector)
173
+ assert len(els) == 1
174
+ assert els[0].name == 'p'
175
+ assert els[0]['class'] == ['onep']
176
+
177
+ def test_class_mismatched_tag(self):
178
+ els = self.soup.select('div.onep')
179
+ assert len(els) == 0
180
+
181
+ def test_one_id(self):
182
+ for selector in ('div#inner', '#inner', 'div div#inner'):
183
+ self.assert_selects(selector, ['inner'])
184
+
185
+ def test_bad_id(self):
186
+ els = self.soup.select('#doesnotexist')
187
+ assert len(els) == 0
188
+
189
+ def test_items_in_id(self):
190
+ els = self.soup.select('div#inner p')
191
+ assert len(els) == 3
192
+ for el in els:
193
+ assert el.name == 'p'
194
+ assert els[1]['class'] == ['onep']
195
+ assert not els[0].has_attr('class')
196
+
197
+ def test_a_bunch_of_emptys(self):
198
+ for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
199
+ assert len(self.soup.select(selector)) == 0
200
+
201
+ def test_multi_class_support(self):
202
+ for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
203
+ '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
204
+ self.assert_selects(selector, ['pmulti'])
205
+
206
+ def test_multi_class_selection(self):
207
+ for selector in ('.class1.class3', '.class3.class2',
208
+ '.class1.class2.class3'):
209
+ self.assert_selects(selector, ['pmulti'])
210
+
211
+ def test_child_selector(self):
212
+ self.assert_selects('.s1 > a', ['s1a1', 's1a2'])
213
+ self.assert_selects('.s1 > a span', ['s1a2s1'])
214
+
215
+ def test_child_selector_id(self):
216
+ self.assert_selects('.s1 > a#s1a2 span', ['s1a2s1'])
217
+
218
+ def test_attribute_equals(self):
219
+ self.assert_select_multiple(
220
+ ('p[class="onep"]', ['p1']),
221
+ ('p[id="p1"]', ['p1']),
222
+ ('[class="onep"]', ['p1']),
223
+ ('[id="p1"]', ['p1']),
224
+ ('link[rel="stylesheet"]', ['l1']),
225
+ ('link[type="text/css"]', ['l1']),
226
+ ('link[href="blah.css"]', ['l1']),
227
+ ('link[href="no-blah.css"]', []),
228
+ ('[rel="stylesheet"]', ['l1']),
229
+ ('[type="text/css"]', ['l1']),
230
+ ('[href="blah.css"]', ['l1']),
231
+ ('[href="no-blah.css"]', []),
232
+ ('p[href="no-blah.css"]', []),
233
+ ('[href="no-blah.css"]', []),
234
+ )
235
+
236
+ def test_attribute_tilde(self):
237
+ self.assert_select_multiple(
238
+ ('p[class~="class1"]', ['pmulti']),
239
+ ('p[class~="class2"]', ['pmulti']),
240
+ ('p[class~="class3"]', ['pmulti']),
241
+ ('[class~="class1"]', ['pmulti']),
242
+ ('[class~="class2"]', ['pmulti']),
243
+ ('[class~="class3"]', ['pmulti']),
244
+ ('a[rel~="friend"]', ['bob']),
245
+ ('a[rel~="met"]', ['bob']),
246
+ ('[rel~="friend"]', ['bob']),
247
+ ('[rel~="met"]', ['bob']),
248
+ )
249
+
250
+ def test_attribute_startswith(self):
251
+ self.assert_select_multiple(
252
+ ('[rel^="style"]', ['l1']),
253
+ ('link[rel^="style"]', ['l1']),
254
+ ('notlink[rel^="notstyle"]', []),
255
+ ('[rel^="notstyle"]', []),
256
+ ('link[rel^="notstyle"]', []),
257
+ ('link[href^="bla"]', ['l1']),
258
+ ('a[href^="http://"]', ['bob', 'me']),
259
+ ('[href^="http://"]', ['bob', 'me']),
260
+ ('[id^="p"]', ['pmulti', 'p1']),
261
+ ('[id^="m"]', ['me', 'main']),
262
+ ('div[id^="m"]', ['main']),
263
+ ('a[id^="m"]', ['me']),
264
+ ('div[data-tag^="dashed"]', ['data1'])
265
+ )
266
+
267
+ def test_attribute_endswith(self):
268
+ self.assert_select_multiple(
269
+ ('[href$=".css"]', ['l1']),
270
+ ('link[href$=".css"]', ['l1']),
271
+ ('link[id$="1"]', ['l1']),
272
+ ('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
273
+ ('div[id$="1"]', ['data1']),
274
+ ('[id$="noending"]', []),
275
+ )
276
+
277
+ def test_attribute_contains(self):
278
+ self.assert_select_multiple(
279
+ # From test_attribute_startswith
280
+ ('[rel*="style"]', ['l1']),
281
+ ('link[rel*="style"]', ['l1']),
282
+ ('notlink[rel*="notstyle"]', []),
283
+ ('[rel*="notstyle"]', []),
284
+ ('link[rel*="notstyle"]', []),
285
+ ('link[href*="bla"]', ['l1']),
286
+ ('[href*="http://"]', ['bob', 'me']),
287
+ ('[id*="p"]', ['pmulti', 'p1']),
288
+ ('div[id*="m"]', ['main']),
289
+ ('a[id*="m"]', ['me']),
290
+ # From test_attribute_endswith
291
+ ('[href*=".css"]', ['l1']),
292
+ ('link[href*=".css"]', ['l1']),
293
+ ('link[id*="1"]', ['l1']),
294
+ ('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
295
+ ('div[id*="1"]', ['data1']),
296
+ ('[id*="noending"]', []),
297
+ # New for this test
298
+ ('[href*="."]', ['bob', 'me', 'l1']),
299
+ ('a[href*="."]', ['bob', 'me']),
300
+ ('link[href*="."]', ['l1']),
301
+ ('div[id*="n"]', ['main', 'inner']),
302
+ ('div[id*="nn"]', ['inner']),
303
+ ('div[data-tag*="edval"]', ['data1'])
304
+ )
305
+
306
+ def test_attribute_exact_or_hypen(self):
307
+ self.assert_select_multiple(
308
+ ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
309
+ ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
310
+ ('p[lang|="fr"]', ['lang-fr']),
311
+ ('p[lang|="gb"]', []),
312
+ )
313
+
314
+ def test_attribute_exists(self):
315
+ self.assert_select_multiple(
316
+ ('[rel]', ['l1', 'bob', 'me']),
317
+ ('link[rel]', ['l1']),
318
+ ('a[rel]', ['bob', 'me']),
319
+ ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
320
+ ('p[class]', ['p1', 'pmulti']),
321
+ ('[blah]', []),
322
+ ('p[blah]', []),
323
+ ('div[data-tag]', ['data1'])
324
+ )
325
+
326
+ def test_quoted_space_in_selector_name(self):
327
+ html = """<div style="display: wrong">nope</div>
328
+ <div style="display: right">yes</div>
329
+ """
330
+ soup = BeautifulSoup(html, 'html.parser')
331
+ [chosen] = soup.select('div[style="display: right"]')
332
+ assert "yes" == chosen.string
333
+
334
+ def test_unsupported_pseudoclass(self):
335
+ with pytest.raises(NotImplementedError):
336
+ self.soup.select("a:no-such-pseudoclass")
337
+
338
+ with pytest.raises(SelectorSyntaxError):
339
+ self.soup.select("a:nth-of-type(a)")
340
+
341
+ def test_nth_of_type(self):
342
+ # Try to select first paragraph
343
+ els = self.soup.select('div#inner p:nth-of-type(1)')
344
+ assert len(els) == 1
345
+ assert els[0].string == 'Some text'
346
+
347
+ # Try to select third paragraph
348
+ els = self.soup.select('div#inner p:nth-of-type(3)')
349
+ assert len(els) == 1
350
+ assert els[0].string == 'Another'
351
+
352
+ # Try to select (non-existent!) fourth paragraph
353
+ els = self.soup.select('div#inner p:nth-of-type(4)')
354
+ assert len(els) == 0
355
+
356
+ # Zero will select no tags.
357
+ els = self.soup.select('div p:nth-of-type(0)')
358
+ assert len(els) == 0
359
+
360
+ def test_nth_of_type_direct_descendant(self):
361
+ els = self.soup.select('div#inner > p:nth-of-type(1)')
362
+ assert len(els) == 1
363
+ assert els[0].string == 'Some text'
364
+
365
+ def test_id_child_selector_nth_of_type(self):
366
+ self.assert_selects('#inner > p:nth-of-type(2)', ['p1'])
367
+
368
+ def test_select_on_element(self):
369
+ # Other tests operate on the tree; this operates on an element
370
+ # within the tree.
371
+ inner = self.soup.find("div", id="main")
372
+ selected = inner.select("div")
373
+ # The <div id="inner"> tag was selected. The <div id="footer">
374
+ # tag was not.
375
+ self.assert_selects_ids(selected, ['inner', 'data1'])
376
+
377
+ def test_overspecified_child_id(self):
378
+ self.assert_selects(".fancy #inner", ['inner'])
379
+ self.assert_selects(".normal #inner", [])
380
+
381
+ def test_adjacent_sibling_selector(self):
382
+ self.assert_selects('#p1 + h2', ['header2'])
383
+ self.assert_selects('#p1 + h2 + p', ['pmulti'])
384
+ self.assert_selects('#p1 + #header2 + .class1', ['pmulti'])
385
+ assert [] == self.soup.select('#p1 + p')
386
+
387
+ def test_general_sibling_selector(self):
388
+ self.assert_selects('#p1 ~ h2', ['header2', 'header3'])
389
+ self.assert_selects('#p1 ~ #header2', ['header2'])
390
+ self.assert_selects('#p1 ~ h2 + a', ['me'])
391
+ self.assert_selects('#p1 ~ h2 + [rel="me"]', ['me'])
392
+ assert [] == self.soup.select('#inner ~ h2')
393
+
394
+ def test_dangling_combinator(self):
395
+ with pytest.raises(SelectorSyntaxError):
396
+ self.soup.select('h1 >')
397
+
398
+ def test_sibling_combinator_wont_select_same_tag_twice(self):
399
+ self.assert_selects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
400
+
401
+ # Test the selector grouping operator (the comma)
402
+ def test_multiple_select(self):
403
+ self.assert_selects('x, y', ['xid', 'yid'])
404
+
405
+ def test_multiple_select_with_no_space(self):
406
+ self.assert_selects('x,y', ['xid', 'yid'])
407
+
408
+ def test_multiple_select_with_more_space(self):
409
+ self.assert_selects('x, y', ['xid', 'yid'])
410
+
411
+ def test_multiple_select_duplicated(self):
412
+ self.assert_selects('x, x', ['xid'])
413
+
414
+ def test_multiple_select_sibling(self):
415
+ self.assert_selects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
416
+
417
+ def test_multiple_select_tag_and_direct_descendant(self):
418
+ self.assert_selects('x, y > z', ['xid', 'zidb'])
419
+
420
+ def test_multiple_select_direct_descendant_and_tags(self):
421
+ self.assert_selects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
422
+
423
+ def test_multiple_select_indirect_descendant(self):
424
+ self.assert_selects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
425
+
426
+ def test_invalid_multiple_select(self):
427
+ with pytest.raises(SelectorSyntaxError):
428
+ self.soup.select(',x, y')
429
+ with pytest.raises(SelectorSyntaxError):
430
+ self.soup.select('x,,y')
431
+
432
+ def test_multiple_select_attrs(self):
433
+ self.assert_selects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
434
+
435
+ def test_multiple_select_ids(self):
436
+ self.assert_selects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
437
+
438
+ def test_multiple_select_nested(self):
439
+ self.assert_selects('body > div > x, y > z', ['xid', 'zidb'])
440
+
441
+ def test_select_duplicate_elements(self):
442
+ # When markup contains duplicate elements, a multiple select
443
+ # will find all of them.
444
+ markup = '<div class="c1"/><div class="c2"/><div class="c1"/>'
445
+ soup = BeautifulSoup(markup, 'html.parser')
446
+ selected = soup.select(".c1, .c2")
447
+ assert 3 == len(selected)
448
+
449
+ # Verify that find_all finds the same elements, though because
450
+ # of an implementation detail it finds them in a different
451
+ # order.
452
+ for element in soup.find_all(class_=['c1', 'c2']):
453
+ assert element in selected
454
+
455
+ def test_closest(self):
456
+ inner = self.soup.find("div", id="inner")
457
+ closest = inner.css.closest("div[id=main]")
458
+ assert closest == self.soup.find("div", id="main")
459
+
460
+ def test_match(self):
461
+ inner = self.soup.find("div", id="inner")
462
+ main = self.soup.find("div", id="main")
463
+ assert inner.css.match("div[id=main]") == False
464
+ assert main.css.match("div[id=main]") == True
465
+
466
+ def test_iselect(self):
467
+ gen = self.soup.css.iselect("h2")
468
+ assert isinstance(gen, types.GeneratorType)
469
+ [header2, header3] = gen
470
+ assert header2['id'] == 'header2'
471
+ assert header3['id'] == 'header3'
472
+
473
+ def test_filter(self):
474
+ inner = self.soup.find("div", id="inner")
475
+ results = inner.css.filter("h2")
476
+ assert len(inner.css.filter("h2")) == 2
477
+
478
+ results = inner.css.filter("h2[id=header3]")
479
+ assert isinstance(results, ResultSet)
480
+ [result] = results
481
+ assert result['id'] == 'header3'
482
+
483
+ def test_escape(self):
484
+ m = self.soup.css.escape
485
+ assert m(".foo#bar") == '\\.foo\\#bar'
486
+ assert m("()[]{}") == '\\(\\)\\[\\]\\{\\}'
487
+ assert m(".foo") == self.soup.css.escape(".foo")
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_dammit.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ import pytest
3
+ import logging
4
+ import bs4
5
+ from bs4 import BeautifulSoup
6
+ from bs4.dammit import (
7
+ EntitySubstitution,
8
+ EncodingDetector,
9
+ UnicodeDammit,
10
+ )
11
+
12
+ class TestUnicodeDammit(object):
13
+ """Standalone tests of UnicodeDammit."""
14
+
15
+ def test_unicode_input(self):
16
+ markup = "I'm already Unicode! \N{SNOWMAN}"
17
+ dammit = UnicodeDammit(markup)
18
+ assert dammit.unicode_markup == markup
19
+
20
+ @pytest.mark.parametrize(
21
+ "smart_quotes_to,expect_converted",
22
+ [(None, "\u2018\u2019\u201c\u201d"),
23
+ ("xml", "&#x2018;&#x2019;&#x201C;&#x201D;"),
24
+ ("html", "&lsquo;&rsquo;&ldquo;&rdquo;"),
25
+ ("ascii", "''" + '""'),
26
+ ]
27
+ )
28
+ def test_smart_quotes_to(self, smart_quotes_to, expect_converted):
29
+ """Verify the functionality of the smart_quotes_to argument
30
+ to the UnicodeDammit constructor."""
31
+ markup = b"<foo>\x91\x92\x93\x94</foo>"
32
+ converted = UnicodeDammit(
33
+ markup, known_definite_encodings=["windows-1252"],
34
+ smart_quotes_to=smart_quotes_to
35
+ ).unicode_markup
36
+ assert converted == "<foo>{}</foo>".format(expect_converted)
37
+
38
+ def test_detect_utf8(self):
39
+ utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
40
+ dammit = UnicodeDammit(utf8)
41
+ assert dammit.original_encoding.lower() == 'utf-8'
42
+ assert dammit.unicode_markup == 'Sacr\xe9 bleu! \N{SNOWMAN}'
43
+
44
+ def test_convert_hebrew(self):
45
+ hebrew = b"\xed\xe5\xec\xf9"
46
+ dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
47
+ assert dammit.original_encoding.lower() == 'iso-8859-8'
48
+ assert dammit.unicode_markup == '\u05dd\u05d5\u05dc\u05e9'
49
+
50
+ def test_dont_see_smart_quotes_where_there_are_none(self):
51
+ utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
52
+ dammit = UnicodeDammit(utf_8)
53
+ assert dammit.original_encoding.lower() == 'utf-8'
54
+ assert dammit.unicode_markup.encode("utf-8") == utf_8
55
+
56
+ def test_ignore_inappropriate_codecs(self):
57
+ utf8_data = "Räksmörgås".encode("utf-8")
58
+ dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
59
+ assert dammit.original_encoding.lower() == 'utf-8'
60
+
61
+ def test_ignore_invalid_codecs(self):
62
+ utf8_data = "Räksmörgås".encode("utf-8")
63
+ for bad_encoding in ['.utf8', '...', 'utF---16.!']:
64
+ dammit = UnicodeDammit(utf8_data, [bad_encoding])
65
+ assert dammit.original_encoding.lower() == 'utf-8'
66
+
67
+ def test_exclude_encodings(self):
68
+ # This is UTF-8.
69
+ utf8_data = "Räksmörgås".encode("utf-8")
70
+
71
+ # But if we exclude UTF-8 from consideration, the guess is
72
+ # Windows-1252.
73
+ dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
74
+ assert dammit.original_encoding.lower() == 'windows-1252'
75
+
76
+ # And if we exclude that, there is no valid guess at all.
77
+ dammit = UnicodeDammit(
78
+ utf8_data, exclude_encodings=["utf-8", "windows-1252"])
79
+ assert dammit.original_encoding == None
80
+
81
+ class TestEncodingDetector(object):
82
+
83
+ def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
84
+ detected = EncodingDetector(
85
+ b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
86
+ encodings = list(detected.encodings)
87
+ assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
88
+
89
+ def test_detect_html5_style_meta_tag(self):
90
+
91
+ for data in (
92
+ b'<html><meta charset="euc-jp" /></html>',
93
+ b"<html><meta charset='euc-jp' /></html>",
94
+ b"<html><meta charset=euc-jp /></html>",
95
+ b"<html><meta charset=euc-jp/></html>"):
96
+ dammit = UnicodeDammit(data, is_html=True)
97
+ assert "euc-jp" == dammit.original_encoding
98
+
99
+ def test_last_ditch_entity_replacement(self):
100
+ # This is a UTF-8 document that contains bytestrings
101
+ # completely incompatible with UTF-8 (ie. encoded with some other
102
+ # encoding).
103
+ #
104
+ # Since there is no consistent encoding for the document,
105
+ # Unicode, Dammit will eventually encode the document as UTF-8
106
+ # and encode the incompatible characters as REPLACEMENT
107
+ # CHARACTER.
108
+ #
109
+ # If chardet is installed, it will detect that the document
110
+ # can be converted into ISO-8859-1 without errors. This happens
111
+ # to be the wrong encoding, but it is a consistent encoding, so the
112
+ # code we're testing here won't run.
113
+ #
114
+ # So we temporarily disable chardet if it's present.
115
+ doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
116
+ <html><b>\330\250\330\252\330\261</b>
117
+ <i>\310\322\321\220\312\321\355\344</i></html>"""
118
+ chardet = bs4.dammit.chardet_dammit
119
+ logging.disable(logging.WARNING)
120
+ try:
121
+ def noop(str):
122
+ return None
123
+ bs4.dammit.chardet_dammit = noop
124
+ dammit = UnicodeDammit(doc)
125
+ assert True == dammit.contains_replacement_characters
126
+ assert "\ufffd" in dammit.unicode_markup
127
+
128
+ soup = BeautifulSoup(doc, "html.parser")
129
+ assert soup.contains_replacement_characters
130
+ finally:
131
+ logging.disable(logging.NOTSET)
132
+ bs4.dammit.chardet_dammit = chardet
133
+
134
+ def test_byte_order_mark_removed(self):
135
+ # A document written in UTF-16LE will have its byte order marker stripped.
136
+ data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
137
+ dammit = UnicodeDammit(data)
138
+ assert "<a>áé</a>" == dammit.unicode_markup
139
+ assert "utf-16le" == dammit.original_encoding
140
+
141
+ def test_known_definite_versus_user_encodings(self):
142
+ # The known_definite_encodings are used before sniffing the
143
+ # byte-order mark; the user_encodings are used afterwards.
144
+
145
+ # Here's a document in UTF-16LE.
146
+ data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
147
+ dammit = UnicodeDammit(data)
148
+
149
+ # We can process it as UTF-16 by passing it in as a known
150
+ # definite encoding.
151
+ before = UnicodeDammit(data, known_definite_encodings=["utf-16"])
152
+ assert "utf-16" == before.original_encoding
153
+
154
+ # If we pass UTF-18 as a user encoding, it's not even
155
+ # tried--the encoding sniffed from the byte-order mark takes
156
+ # precedence.
157
+ after = UnicodeDammit(data, user_encodings=["utf-8"])
158
+ assert "utf-16le" == after.original_encoding
159
+ assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings]
160
+
161
+ # Here's a document in ISO-8859-8.
162
+ hebrew = b"\xed\xe5\xec\xf9"
163
+ dammit = UnicodeDammit(hebrew, known_definite_encodings=["utf-8"],
164
+ user_encodings=["iso-8859-8"])
165
+
166
+ # The known_definite_encodings don't work, BOM sniffing does
167
+ # nothing (it only works for a few UTF encodings), but one of
168
+ # the user_encodings does work.
169
+ assert "iso-8859-8" == dammit.original_encoding
170
+ assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings]
171
+
172
+ def test_deprecated_override_encodings(self):
173
+ # override_encodings is a deprecated alias for
174
+ # known_definite_encodings.
175
+ hebrew = b"\xed\xe5\xec\xf9"
176
+ dammit = UnicodeDammit(
177
+ hebrew,
178
+ known_definite_encodings=["shift-jis"],
179
+ override_encodings=["utf-8"],
180
+ user_encodings=["iso-8859-8"],
181
+ )
182
+ assert "iso-8859-8" == dammit.original_encoding
183
+
184
+ # known_definite_encodings and override_encodings were tried
185
+ # before user_encodings.
186
+ assert ["shift-jis", "utf-8", "iso-8859-8"] == (
187
+ [x[0] for x in dammit.tried_encodings]
188
+ )
189
+
190
+ def test_detwingle(self):
191
+ # Here's a UTF8 document.
192
+ utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
193
+
194
+ # Here's a Windows-1252 document.
195
+ windows_1252 = (
196
+ "\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
197
+ "\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
198
+
199
+ # Through some unholy alchemy, they've been stuck together.
200
+ doc = utf8 + windows_1252 + utf8
201
+
202
+ # The document can't be turned into UTF-8:
203
+ with pytest.raises(UnicodeDecodeError):
204
+ doc.decode("utf8")
205
+
206
+ # Unicode, Dammit thinks the whole document is Windows-1252,
207
+ # and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
208
+
209
+ # But if we run it through fix_embedded_windows_1252, it's fixed:
210
+ fixed = UnicodeDammit.detwingle(doc)
211
+ assert "☃☃☃“Hi, I like Windows!”☃☃☃" == fixed.decode("utf8")
212
+
213
+ def test_detwingle_ignores_multibyte_characters(self):
214
+ # Each of these characters has a UTF-8 representation ending
215
+ # in \x93. \x93 is a smart quote if interpreted as
216
+ # Windows-1252. But our code knows to skip over multibyte
217
+ # UTF-8 characters, so they'll survive the process unscathed.
218
+ for tricky_unicode_char in (
219
+ "\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
220
+ "\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
221
+ "\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
222
+ ):
223
+ input = tricky_unicode_char.encode("utf8")
224
+ assert input.endswith(b'\x93')
225
+ output = UnicodeDammit.detwingle(input)
226
+ assert output == input
227
+
228
+ def test_find_declared_encoding(self):
229
+ # Test our ability to find a declared encoding inside an
230
+ # XML or HTML document.
231
+ #
232
+ # Even if the document comes in as Unicode, it may be
233
+ # interesting to know what encoding was claimed
234
+ # originally.
235
+
236
+ html_unicode = '<html><head><meta charset="utf-8"></head></html>'
237
+ html_bytes = html_unicode.encode("ascii")
238
+
239
+ xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
240
+ xml_bytes = xml_unicode.encode("ascii")
241
+
242
+ m = EncodingDetector.find_declared_encoding
243
+ assert m(html_unicode, is_html=False) is None
244
+ assert "utf-8" == m(html_unicode, is_html=True)
245
+ assert "utf-8" == m(html_bytes, is_html=True)
246
+
247
+ assert "iso-8859-1" == m(xml_unicode)
248
+ assert "iso-8859-1" == m(xml_bytes)
249
+
250
+ # Normally, only the first few kilobytes of a document are checked for
251
+ # an encoding.
252
+ spacer = b' ' * 5000
253
+ assert m(spacer + html_bytes) is None
254
+ assert m(spacer + xml_bytes) is None
255
+
256
+ # But you can tell find_declared_encoding to search an entire
257
+ # HTML document.
258
+ assert (
259
+ m(spacer + html_bytes, is_html=True, search_entire_document=True)
260
+ == "utf-8"
261
+ )
262
+
263
+ # The XML encoding declaration has to be the very first thing
264
+ # in the document. We'll allow whitespace before the document
265
+ # starts, but nothing else.
266
+ assert m(xml_bytes, search_entire_document=True) == "iso-8859-1"
267
+ assert m(b' ' + xml_bytes, search_entire_document=True) == "iso-8859-1"
268
+ assert m(b'a' + xml_bytes, search_entire_document=True) is None
269
+
270
+
271
+ class TestEntitySubstitution(object):
272
+ """Standalone tests of the EntitySubstitution class."""
273
+ def setup_method(self):
274
+ self.sub = EntitySubstitution
275
+
276
+
277
+ @pytest.mark.parametrize(
278
+ "original,substituted",
279
+ [
280
+ # Basic case. Unicode characters corresponding to named
281
+ # HTML entites are substituted; others are not.
282
+ ("foo\u2200\N{SNOWMAN}\u00f5bar",
283
+ "foo&forall;\N{SNOWMAN}&otilde;bar"),
284
+
285
+ # MS smart quotes are a common source of frustration, so we
286
+ # give them a special test.
287
+ ('‘’foo“”', "&lsquo;&rsquo;foo&ldquo;&rdquo;"),
288
+ ]
289
+ )
290
+ def test_substitute_html(self, original, substituted):
291
+ assert self.sub.substitute_html(original) == substituted
292
+
293
+ def test_html5_entity(self):
294
+ for entity, u in (
295
+ # A few spot checks of our ability to recognize
296
+ # special character sequences and convert them
297
+ # to named entities.
298
+ ('&models;', '\u22a7'),
299
+ ('&Nfr;', '\U0001d511'),
300
+ ('&ngeqq;', '\u2267\u0338'),
301
+ ('&not;', '\xac'),
302
+ ('&Not;', '\u2aec'),
303
+
304
+ # We _could_ convert | to &verbarr;, but we don't, because
305
+ # | is an ASCII character.
306
+ ('|' '|'),
307
+
308
+ # Similarly for the fj ligature, which we could convert to
309
+ # &fjlig;, but we don't.
310
+ ("fj", "fj"),
311
+
312
+ # We do convert _these_ ASCII characters to HTML entities,
313
+ # because that's required to generate valid HTML.
314
+ ('&gt;', '>'),
315
+ ('&lt;', '<'),
316
+ ('&amp;', '&'),
317
+ ):
318
+ template = '3 %s 4'
319
+ raw = template % u
320
+ with_entities = template % entity
321
+ assert self.sub.substitute_html(raw) == with_entities
322
+
323
+ def test_html5_entity_with_variation_selector(self):
324
+ # Some HTML5 entities correspond either to a single-character
325
+ # Unicode sequence _or_ to the same character plus U+FE00,
326
+ # VARIATION SELECTOR 1. We can handle this.
327
+ data = "fjords \u2294 penguins"
328
+ markup = "fjords &sqcup; penguins"
329
+ assert self.sub.substitute_html(data) == markup
330
+
331
+ data = "fjords \u2294\ufe00 penguins"
332
+ markup = "fjords &sqcups; penguins"
333
+ assert self.sub.substitute_html(data) == markup
334
+
335
+ def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
336
+ s = 'Welcome to "my bar"'
337
+ assert self.sub.substitute_xml(s, False) == s
338
+
339
+ def test_xml_attribute_quoting_normally_uses_double_quotes(self):
340
+ assert self.sub.substitute_xml("Welcome", True) == '"Welcome"'
341
+ assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"'
342
+
343
+ def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
344
+ s = 'Welcome to "my bar"'
345
+ assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'"
346
+
347
+ def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
348
+ s = 'Welcome to "Bob\'s Bar"'
349
+ assert self.sub.substitute_xml(s, True) == '"Welcome to &quot;Bob\'s Bar&quot;"'
350
+
351
+ def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
352
+ quoted = 'Welcome to "Bob\'s Bar"'
353
+ assert self.sub.substitute_xml(quoted) == quoted
354
+
355
+ def test_xml_quoting_handles_angle_brackets(self):
356
+ assert self.sub.substitute_xml("foo<bar>") == "foo&lt;bar&gt;"
357
+
358
+ def test_xml_quoting_handles_ampersands(self):
359
+ assert self.sub.substitute_xml("AT&T") == "AT&amp;T"
360
+
361
+ def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
362
+ assert self.sub.substitute_xml("&Aacute;T&T") == "&amp;Aacute;T&amp;T"
363
+
364
+ def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
365
+ assert self.sub.substitute_xml_containing_entities("&Aacute;T&T") == "&Aacute;T&amp;T"
366
+
367
+ def test_quotes_not_html_substituted(self):
368
+ """There's no need to do this except inside attribute values."""
369
+ text = 'Bob\'s "bar"'
370
+ assert self.sub.substitute_html(text) == text
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_docs.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "Test harness for doctests."
2
+
3
+ # TODO: Pretty sure this isn't used and should be deleted.
4
+
5
+ # pylint: disable-msg=E0611,W0142
6
+
7
+ __metaclass__ = type
8
+ __all__ = [
9
+ 'additional_tests',
10
+ ]
11
+
12
+ import atexit
13
+ import doctest
14
+ import os
15
+ #from pkg_resources import (
16
+ # resource_filename, resource_exists, resource_listdir, cleanup_resources)
17
+ import unittest
18
+
19
+ DOCTEST_FLAGS = (
20
+ doctest.ELLIPSIS |
21
+ doctest.NORMALIZE_WHITESPACE |
22
+ doctest.REPORT_NDIFF)
23
+
24
+
25
+ # def additional_tests():
26
+ # "Run the doc tests (README.txt and docs/*, if any exist)"
27
+ # doctest_files = [
28
+ # os.path.abspath(resource_filename('bs4', 'README.txt'))]
29
+ # if resource_exists('bs4', 'docs'):
30
+ # for name in resource_listdir('bs4', 'docs'):
31
+ # if name.endswith('.txt'):
32
+ # doctest_files.append(
33
+ # os.path.abspath(
34
+ # resource_filename('bs4', 'docs/%s' % name)))
35
+ # kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
36
+ # atexit.register(cleanup_resources)
37
+ # return unittest.TestSuite((
38
+ # doctest.DocFileSuite(*doctest_files, **kwargs)))
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_element.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests of classes in element.py.
2
+
3
+ The really big classes -- Tag, PageElement, and NavigableString --
4
+ are tested in separate files.
5
+ """
6
+
7
+ from bs4.element import (
8
+ CharsetMetaAttributeValue,
9
+ ContentMetaAttributeValue,
10
+ NamespacedAttribute,
11
+ )
12
+ from . import SoupTest
13
+
14
+
15
+ class TestNamedspacedAttribute(object):
16
+
17
+ def test_name_may_be_none_or_missing(self):
18
+ a = NamespacedAttribute("xmlns", None)
19
+ assert a == "xmlns"
20
+
21
+ a = NamespacedAttribute("xmlns", "")
22
+ assert a == "xmlns"
23
+
24
+ a = NamespacedAttribute("xmlns")
25
+ assert a == "xmlns"
26
+
27
+ def test_namespace_may_be_none_or_missing(self):
28
+ a = NamespacedAttribute(None, "tag")
29
+ assert a == "tag"
30
+
31
+ a = NamespacedAttribute("", "tag")
32
+ assert a == "tag"
33
+
34
+ def test_attribute_is_equivalent_to_colon_separated_string(self):
35
+ a = NamespacedAttribute("a", "b")
36
+ assert "a:b" == a
37
+
38
+ def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
39
+ a = NamespacedAttribute("a", "b", "c")
40
+ b = NamespacedAttribute("a", "b", "c")
41
+ assert a == b
42
+
43
+ # The actual namespace is not considered.
44
+ c = NamespacedAttribute("a", "b", None)
45
+ assert a == c
46
+
47
+ # But name and prefix are important.
48
+ d = NamespacedAttribute("a", "z", "c")
49
+ assert a != d
50
+
51
+ e = NamespacedAttribute("z", "b", "c")
52
+ assert a != e
53
+
54
+
55
+ class TestAttributeValueWithCharsetSubstitution(object):
56
+ """Certain attributes are designed to have the charset of the
57
+ final document substituted into their value.
58
+ """
59
+
60
+ def test_content_meta_attribute_value(self):
61
+ # The value of a CharsetMetaAttributeValue is whatever
62
+ # encoding the string is in.
63
+ value = CharsetMetaAttributeValue("euc-jp")
64
+ assert "euc-jp" == value
65
+ assert "euc-jp" == value.original_value
66
+ assert "utf8" == value.encode("utf8")
67
+ assert "ascii" == value.encode("ascii")
68
+
69
+ def test_content_meta_attribute_value(self):
70
+ value = ContentMetaAttributeValue("text/html; charset=euc-jp")
71
+ assert "text/html; charset=euc-jp" == value
72
+ assert "text/html; charset=euc-jp" == value.original_value
73
+ assert "text/html; charset=utf8" == value.encode("utf8")
74
+ assert "text/html; charset=ascii" == value.encode("ascii")
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_formatter.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from bs4.element import Tag
4
+ from bs4.formatter import (
5
+ Formatter,
6
+ HTMLFormatter,
7
+ XMLFormatter,
8
+ )
9
+ from . import SoupTest
10
+
11
+ class TestFormatter(SoupTest):
12
+
13
+ def test_default_attributes(self):
14
+ # Test the default behavior of Formatter.attributes().
15
+ formatter = Formatter()
16
+ tag = Tag(name="tag")
17
+ tag['b'] = 1
18
+ tag['a'] = 2
19
+
20
+ # Attributes come out sorted by name. In Python 3, attributes
21
+ # normally come out of a dictionary in the order they were
22
+ # added.
23
+ assert [('a', 2), ('b', 1)] == formatter.attributes(tag)
24
+
25
+ # This works even if Tag.attrs is None, though this shouldn't
26
+ # normally happen.
27
+ tag.attrs = None
28
+ assert [] == formatter.attributes(tag)
29
+
30
+ assert ' ' == formatter.indent
31
+
32
+ def test_sort_attributes(self):
33
+ # Test the ability to override Formatter.attributes() to,
34
+ # e.g., disable the normal sorting of attributes.
35
+ class UnsortedFormatter(Formatter):
36
+ def attributes(self, tag):
37
+ self.called_with = tag
38
+ for k, v in sorted(tag.attrs.items()):
39
+ if k == 'ignore':
40
+ continue
41
+ yield k,v
42
+
43
+ soup = self.soup('<p cval="1" aval="2" ignore="ignored"></p>')
44
+ formatter = UnsortedFormatter()
45
+ decoded = soup.decode(formatter=formatter)
46
+
47
+ # attributes() was called on the <p> tag. It filtered out one
48
+ # attribute and sorted the other two.
49
+ assert formatter.called_with == soup.p
50
+ assert '<p aval="2" cval="1"></p>' == decoded
51
+
52
+ def test_empty_attributes_are_booleans(self):
53
+ # Test the behavior of empty_attributes_are_booleans as well
54
+ # as which Formatters have it enabled.
55
+
56
+ for name in ('html', 'minimal', None):
57
+ formatter = HTMLFormatter.REGISTRY[name]
58
+ assert False == formatter.empty_attributes_are_booleans
59
+
60
+ formatter = XMLFormatter.REGISTRY[None]
61
+ assert False == formatter.empty_attributes_are_booleans
62
+
63
+ formatter = HTMLFormatter.REGISTRY['html5']
64
+ assert True == formatter.empty_attributes_are_booleans
65
+
66
+ # Verify that the constructor sets the value.
67
+ formatter = Formatter(empty_attributes_are_booleans=True)
68
+ assert True == formatter.empty_attributes_are_booleans
69
+
70
+ # Now demonstrate what it does to markup.
71
+ for markup in (
72
+ "<option selected></option>",
73
+ '<option selected=""></option>'
74
+ ):
75
+ soup = self.soup(markup)
76
+ for formatter in ('html', 'minimal', 'xml', None):
77
+ assert b'<option selected=""></option>' == soup.option.encode(formatter='html')
78
+ assert b'<option selected></option>' == soup.option.encode(formatter='html5')
79
+
80
+ @pytest.mark.parametrize(
81
+ "indent,expect",
82
+ [
83
+ (None, '<a>\n<b>\ntext\n</b>\n</a>\n'),
84
+ (-1, '<a>\n<b>\ntext\n</b>\n</a>\n'),
85
+ (0, '<a>\n<b>\ntext\n</b>\n</a>\n'),
86
+ ("", '<a>\n<b>\ntext\n</b>\n</a>\n'),
87
+
88
+ (1, '<a>\n <b>\n text\n </b>\n</a>\n'),
89
+ (2, '<a>\n <b>\n text\n </b>\n</a>\n'),
90
+
91
+ ("\t", '<a>\n\t<b>\n\t\ttext\n\t</b>\n</a>\n'),
92
+ ('abc', '<a>\nabc<b>\nabcabctext\nabc</b>\n</a>\n'),
93
+
94
+ # Some invalid inputs -- the default behavior is used.
95
+ (object(), '<a>\n <b>\n text\n </b>\n</a>\n'),
96
+ (b'bytes', '<a>\n <b>\n text\n </b>\n</a>\n'),
97
+ ]
98
+ )
99
+ def test_indent(self, indent, expect):
100
+ # Pretty-print a tree with a Formatter set to
101
+ # indent in a certain way and verify the results.
102
+ soup = self.soup("<a><b>text</b></a>")
103
+ formatter = Formatter(indent=indent)
104
+ assert soup.prettify(formatter=formatter) == expect
105
+
106
+ # Pretty-printing only happens with prettify(), not
107
+ # encode().
108
+ assert soup.encode(formatter=formatter) != expect
109
+
110
+ def test_default_indent_value(self):
111
+ formatter = Formatter()
112
+ assert formatter.indent == ' '
113
+
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_fuzz.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file contains test cases reported by third parties using
2
+ fuzzing tools, primarily from Google's oss-fuzz project. Some of these
3
+ represent real problems with Beautiful Soup, but many are problems in
4
+ libraries that Beautiful Soup depends on, and many of the test cases
5
+ represent different ways of triggering the same problem.
6
+
7
+ Grouping these test cases together makes it easy to see which test
8
+ cases represent the same problem, and puts the test cases in close
9
+ proximity to code that can trigger the problems.
10
+ """
11
+ import os
12
+ import pytest
13
+ from bs4 import (
14
+ BeautifulSoup,
15
+ ParserRejectedMarkup,
16
+ )
17
+ try:
18
+ from soupsieve.util import SelectorSyntaxError
19
+ import lxml
20
+ import html5lib
21
+ fully_fuzzable = True
22
+ except ImportError:
23
+ fully_fuzzable = False
24
+
25
+
26
+ @pytest.mark.skipif(not fully_fuzzable, reason="Prerequisites for fuzz tests are not installed.")
27
+ class TestFuzz(object):
28
+
29
+ # Test case markup files from fuzzers are given this extension so
30
+ # they can be included in builds.
31
+ TESTCASE_SUFFIX = ".testcase"
32
+
33
+ # Copied 20230512 from
34
+ # https://github.com/google/oss-fuzz/blob/4ac6a645a197a695fe76532251feb5067076b3f3/projects/bs4/bs4_fuzzer.py
35
+ #
36
+ # Copying the code lets us precisely duplicate the behavior of
37
+ # oss-fuzz. The downside is that this code changes over time, so
38
+ # multiple copies of the code must be kept around to run against
39
+ # older tests. I'm not sure what to do about this, but I may
40
+ # retire old tests after a time.
41
+ def fuzz_test_with_css(self, filename):
42
+ data = self.__markup(filename)
43
+ parsers = ['lxml-xml', 'html5lib', 'html.parser', 'lxml']
44
+ try:
45
+ idx = int(data[0]) % len(parsers)
46
+ except ValueError:
47
+ return
48
+
49
+ css_selector, data = data[1:10], data[10:]
50
+
51
+ try:
52
+ soup = BeautifulSoup(data[1:], features=parsers[idx])
53
+ except ParserRejectedMarkup:
54
+ return
55
+ except ValueError:
56
+ return
57
+
58
+ list(soup.find_all(True))
59
+ try:
60
+ soup.css.select(css_selector.decode('utf-8', 'replace'))
61
+ except SelectorSyntaxError:
62
+ return
63
+ soup.prettify()
64
+
65
+ # This class of error has been fixed by catching a less helpful
66
+ # exception from html.parser and raising ParserRejectedMarkup
67
+ # instead.
68
+ @pytest.mark.parametrize(
69
+ "filename", [
70
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912",
71
+ "crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a",
72
+ ]
73
+ )
74
+ def test_rejected_markup(self, filename):
75
+ markup = self.__markup(filename)
76
+ with pytest.raises(ParserRejectedMarkup):
77
+ BeautifulSoup(markup, 'html.parser')
78
+
79
+ # This class of error has to do with very deeply nested documents
80
+ # which overflow the Python call stack when the tree is converted
81
+ # to a string. This is an issue with Beautiful Soup which was fixed
82
+ # as part of [bug=1471755].
83
+ #
84
+ # These test cases are in the older format that doesn't specify
85
+ # which parser to use or give a CSS selector.
86
+ @pytest.mark.parametrize(
87
+ "filename", [
88
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440",
89
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632",
90
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464",
91
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400",
92
+ ]
93
+ )
94
+ def test_deeply_nested_document_without_css(self, filename):
95
+ # Parsing the document and encoding it back to a string is
96
+ # sufficient to demonstrate that the overflow problem has
97
+ # been fixed.
98
+ markup = self.__markup(filename)
99
+ BeautifulSoup(markup, 'html.parser').encode()
100
+
101
+ # This class of error has to do with very deeply nested documents
102
+ # which overflow the Python call stack when the tree is converted
103
+ # to a string. This is an issue with Beautiful Soup which was fixed
104
+ # as part of [bug=1471755].
105
+ @pytest.mark.parametrize(
106
+ "filename", [
107
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016",
108
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000",
109
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624",
110
+ ]
111
+ )
112
+ def test_deeply_nested_document(self, filename):
113
+ self.fuzz_test_with_css(filename)
114
+
115
+ @pytest.mark.parametrize(
116
+ "filename", [
117
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256",
118
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824",
119
+ ]
120
+ )
121
+ def test_soupsieve_errors(self, filename):
122
+ self.fuzz_test_with_css(filename)
123
+
124
+ # This class of error represents problems with html5lib's parser,
125
+ # not Beautiful Soup. I use
126
+ # https://github.com/html5lib/html5lib-python/issues/568 to notify
127
+ # the html5lib developers of these issues.
128
+ #
129
+ # These test cases are in the older format that doesn't specify
130
+ # which parser to use or give a CSS selector.
131
+ @pytest.mark.skip(reason="html5lib-specific problems")
132
+ @pytest.mark.parametrize(
133
+ "filename", [
134
+ # b"""ÿ<!DOCTyPEV PUBLIC'''Ð'"""
135
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320",
136
+
137
+ # b')<a><math><TR><a><mI><a><p><a>'
138
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456",
139
+
140
+ # b'-<math><sElect><mi><sElect><sElect>'
141
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896",
142
+
143
+ # b'ñ<table><svg><html>'
144
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224",
145
+
146
+ # <TABLE>, some ^@ characters, some <math> tags.
147
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744",
148
+
149
+ # Nested table
150
+ "crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08"
151
+ ]
152
+ )
153
+ def test_html5lib_parse_errors_without_css(self, filename):
154
+ markup = self.__markup(filename)
155
+ print(BeautifulSoup(markup, 'html5lib').encode())
156
+
157
+ # This class of error represents problems with html5lib's parser,
158
+ # not Beautiful Soup. I use
159
+ # https://github.com/html5lib/html5lib-python/issues/568 to notify
160
+ # the html5lib developers of these issues.
161
+ @pytest.mark.skip(reason="html5lib-specific problems")
162
+ @pytest.mark.parametrize(
163
+ "filename", [
164
+ # b'- \xff\xff <math>\x10<select><mi><select><select>t'
165
+ "clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640",
166
+ ]
167
+ )
168
+ def test_html5lib_parse_errors(self, filename):
169
+ self.fuzz_test_with_css(filename)
170
+
171
+ def __markup(self, filename):
172
+ if not filename.endswith(self.TESTCASE_SUFFIX):
173
+ filename += self.TESTCASE_SUFFIX
174
+ this_dir = os.path.split(__file__)[0]
175
+ path = os.path.join(this_dir, 'fuzz', filename)
176
+ return open(path, 'rb').read()
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_html5lib.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests to ensure that the html5lib tree builder generates good trees."""
2
+
3
+ import pytest
4
+ import warnings
5
+
6
+ from bs4 import BeautifulSoup
7
+ from bs4.element import SoupStrainer
8
+ from . import (
9
+ HTML5LIB_PRESENT,
10
+ HTML5TreeBuilderSmokeTest,
11
+ SoupTest,
12
+ )
13
+
14
+ @pytest.mark.skipif(
15
+ not HTML5LIB_PRESENT,
16
+ reason="html5lib seems not to be present, not testing its tree builder."
17
+ )
18
+ class TestHTML5LibBuilder(SoupTest, HTML5TreeBuilderSmokeTest):
19
+ """See ``HTML5TreeBuilderSmokeTest``."""
20
+
21
+ @property
22
+ def default_builder(self):
23
+ from bs4.builder import HTML5TreeBuilder
24
+ return HTML5TreeBuilder
25
+
26
+ def test_soupstrainer(self):
27
+ # The html5lib tree builder does not support SoupStrainers.
28
+ strainer = SoupStrainer("b")
29
+ markup = "<p>A <b>bold</b> statement.</p>"
30
+ with warnings.catch_warnings(record=True) as w:
31
+ soup = BeautifulSoup(markup, "html5lib", parse_only=strainer)
32
+ assert soup.decode() == self.document_for(markup)
33
+
34
+ [warning] = w
35
+ assert warning.filename == __file__
36
+ assert "the html5lib tree builder doesn't support parse_only" in str(warning.message)
37
+
38
+ def test_correctly_nested_tables(self):
39
+ """html5lib inserts <tbody> tags where other parsers don't."""
40
+ markup = ('<table id="1">'
41
+ '<tr>'
42
+ "<td>Here's another table:"
43
+ '<table id="2">'
44
+ '<tr><td>foo</td></tr>'
45
+ '</table></td>')
46
+
47
+ self.assert_soup(
48
+ markup,
49
+ '<table id="1"><tbody><tr><td>Here\'s another table:'
50
+ '<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
51
+ '</td></tr></tbody></table>')
52
+
53
+ self.assert_soup(
54
+ "<table><thead><tr><td>Foo</td></tr></thead>"
55
+ "<tbody><tr><td>Bar</td></tr></tbody>"
56
+ "<tfoot><tr><td>Baz</td></tr></tfoot></table>")
57
+
58
+ def test_xml_declaration_followed_by_doctype(self):
59
+ markup = '''<?xml version="1.0" encoding="utf-8"?>
60
+ <!DOCTYPE html>
61
+ <html>
62
+ <head>
63
+ </head>
64
+ <body>
65
+ <p>foo</p>
66
+ </body>
67
+ </html>'''
68
+ soup = self.soup(markup)
69
+ # Verify that we can reach the <p> tag; this means the tree is connected.
70
+ assert b"<p>foo</p>" == soup.p.encode()
71
+
72
+ def test_reparented_markup(self):
73
+ markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
74
+ soup = self.soup(markup)
75
+ assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" == soup.body.decode()
76
+ assert 2 == len(soup.find_all('p'))
77
+
78
+
79
+ def test_reparented_markup_ends_with_whitespace(self):
80
+ markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
81
+ soup = self.soup(markup)
82
+ assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" == soup.body.decode()
83
+ assert 2 == len(soup.find_all('p'))
84
+
85
+ def test_reparented_markup_containing_identical_whitespace_nodes(self):
86
+ """Verify that we keep the two whitespace nodes in this
87
+ document distinct when reparenting the adjacent <tbody> tags.
88
+ """
89
+ markup = '<table> <tbody><tbody><ims></tbody> </table>'
90
+ soup = self.soup(markup)
91
+ space1, space2 = soup.find_all(string=' ')
92
+ tbody1, tbody2 = soup.find_all('tbody')
93
+ assert space1.next_element is tbody1
94
+ assert tbody2.next_element is space2
95
+
96
+ def test_reparented_markup_containing_children(self):
97
+ markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
98
+ soup = self.soup(markup)
99
+ noscript = soup.noscript
100
+ assert "target" == noscript.next_element
101
+ target = soup.find(string='target')
102
+
103
+ # The 'aftermath' string was duplicated; we want the second one.
104
+ final_aftermath = soup.find_all(string='aftermath')[-1]
105
+
106
+ # The <noscript> tag was moved beneath a copy of the <a> tag,
107
+ # but the 'target' string within is still connected to the
108
+ # (second) 'aftermath' string.
109
+ assert final_aftermath == target.next_element
110
+ assert target == final_aftermath.previous_element
111
+
112
+ def test_processing_instruction(self):
113
+ """Processing instructions become comments."""
114
+ markup = b"""<?PITarget PIContent?>"""
115
+ soup = self.soup(markup)
116
+ assert str(soup).startswith("<!--?PITarget PIContent?-->")
117
+
118
+ def test_cloned_multivalue_node(self):
119
+ markup = b"""<a class="my_class"><p></a>"""
120
+ soup = self.soup(markup)
121
+ a1, a2 = soup.find_all('a')
122
+ assert a1 == a2
123
+ assert a1 is not a2
124
+
125
+ def test_foster_parenting(self):
126
+ markup = b"""<table><td></tbody>A"""
127
+ soup = self.soup(markup)
128
+ assert "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" == soup.body.decode()
129
+
130
+ def test_extraction(self):
131
+ """
132
+ Test that extraction does not destroy the tree.
133
+
134
+ https://bugs.launchpad.net/beautifulsoup/+bug/1782928
135
+ """
136
+
137
+ markup = """
138
+ <html><head></head>
139
+ <style>
140
+ </style><script></script><body><p>hello</p></body></html>
141
+ """
142
+ soup = self.soup(markup)
143
+ [s.extract() for s in soup('script')]
144
+ [s.extract() for s in soup('style')]
145
+
146
+ assert len(soup.find_all("p")) == 1
147
+
148
+ def test_empty_comment(self):
149
+ """
150
+ Test that empty comment does not break structure.
151
+
152
+ https://bugs.launchpad.net/beautifulsoup/+bug/1806598
153
+ """
154
+
155
+ markup = """
156
+ <html>
157
+ <body>
158
+ <form>
159
+ <!----><input type="text">
160
+ </form>
161
+ </body>
162
+ </html>
163
+ """
164
+ soup = self.soup(markup)
165
+ inputs = []
166
+ for form in soup.find_all('form'):
167
+ inputs.extend(form.find_all('input'))
168
+ assert len(inputs) == 1
169
+
170
+ def test_tracking_line_numbers(self):
171
+ # The html.parser TreeBuilder keeps track of line number and
172
+ # position of each element.
173
+ markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
174
+ soup = self.soup(markup)
175
+ assert 2 == soup.p.sourceline
176
+ assert 5 == soup.p.sourcepos
177
+ assert "sourceline" == soup.p.find('sourceline').name
178
+
179
+ # You can deactivate this behavior.
180
+ soup = self.soup(markup, store_line_numbers=False)
181
+ assert "sourceline" == soup.p.sourceline.name
182
+ assert "sourcepos" == soup.p.sourcepos.name
183
+
184
+ def test_special_string_containers(self):
185
+ # The html5lib tree builder doesn't support this standard feature,
186
+ # because there's no way of knowing, when a string is created,
187
+ # where in the tree it will eventually end up.
188
+ pass
189
+
190
+ def test_html5_attributes(self):
191
+ # The html5lib TreeBuilder can convert any entity named in
192
+ # the HTML5 spec to a sequence of Unicode characters, and
193
+ # convert those Unicode characters to a (potentially
194
+ # different) named entity on the way out.
195
+ #
196
+ # This is a copy of the same test from
197
+ # HTMLParserTreeBuilderSmokeTest. It's not in the superclass
198
+ # because the lxml HTML TreeBuilder _doesn't_ work this way.
199
+ for input_element, output_unicode, output_element in (
200
+ ("&RightArrowLeftArrow;", '\u21c4', b'&rlarr;'),
201
+ ('&models;', '\u22a7', b'&models;'),
202
+ ('&Nfr;', '\U0001d511', b'&Nfr;'),
203
+ ('&ngeqq;', '\u2267\u0338', b'&ngeqq;'),
204
+ ('&not;', '\xac', b'&not;'),
205
+ ('&Not;', '\u2aec', b'&Not;'),
206
+ ('&quot;', '"', b'"'),
207
+ ('&there4;', '\u2234', b'&there4;'),
208
+ ('&Therefore;', '\u2234', b'&there4;'),
209
+ ('&therefore;', '\u2234', b'&there4;'),
210
+ ("&fjlig;", 'fj', b'fj'),
211
+ ("&sqcup;", '\u2294', b'&sqcup;'),
212
+ ("&sqcups;", '\u2294\ufe00', b'&sqcups;'),
213
+ ("&apos;", "'", b"'"),
214
+ ("&verbar;", "|", b"|"),
215
+ ):
216
+ markup = '<div>%s</div>' % input_element
217
+ div = self.soup(markup).div
218
+ without_element = div.encode()
219
+ expect = b"<div>%s</div>" % output_unicode.encode("utf8")
220
+ assert without_element == expect
221
+
222
+ with_element = div.encode(formatter="html")
223
+ expect = b"<div>%s</div>" % output_element
224
+ assert with_element == expect
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_htmlparser.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests to ensure that the html.parser tree builder generates good
2
+ trees."""
3
+
4
+ from pdb import set_trace
5
+ import pickle
6
+ import pytest
7
+ import warnings
8
+ from bs4.builder import (
9
+ HTMLParserTreeBuilder,
10
+ ParserRejectedMarkup,
11
+ XMLParsedAsHTMLWarning,
12
+ )
13
+ from bs4.builder._htmlparser import BeautifulSoupHTMLParser
14
+ from . import SoupTest, HTMLTreeBuilderSmokeTest
15
+
16
+ class TestHTMLParserTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
17
+
18
+ default_builder = HTMLParserTreeBuilder
19
+
20
+ def test_rejected_input(self):
21
+ # Python's html.parser will occasionally reject markup,
22
+ # especially when there is a problem with the initial DOCTYPE
23
+ # declaration. Different versions of Python sound the alarm in
24
+ # different ways, but Beautiful Soup consistently raises
25
+ # errors as ParserRejectedMarkup exceptions.
26
+ bad_markup = [
27
+ # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28873
28
+ # https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/519e5b4269a01185a0d5e76295251921da2f0700
29
+ # https://github.com/python/cpython/issues/81928
30
+ b'\n<![\xff\xfe\xfe\xcd\x00',
31
+
32
+ #https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/de32aa55785be29bbc72a1a8e06b00611fb3d9f8
33
+ # https://github.com/python/cpython/issues/78661
34
+ #
35
+ b'<![n\x00',
36
+ b"<![UNKNOWN[]]>",
37
+ ]
38
+ for markup in bad_markup:
39
+ with pytest.raises(ParserRejectedMarkup):
40
+ soup = self.soup(markup)
41
+
42
+ def test_namespaced_system_doctype(self):
43
+ # html.parser can't handle namespaced doctypes, so skip this one.
44
+ pass
45
+
46
+ def test_namespaced_public_doctype(self):
47
+ # html.parser can't handle namespaced doctypes, so skip this one.
48
+ pass
49
+
50
+ def test_builder_is_pickled(self):
51
+ """Unlike most tree builders, HTMLParserTreeBuilder and will
52
+ be restored after pickling.
53
+ """
54
+ tree = self.soup("<a><b>foo</a>")
55
+ dumped = pickle.dumps(tree, 2)
56
+ loaded = pickle.loads(dumped)
57
+ assert isinstance(loaded.builder, type(tree.builder))
58
+
59
+ def test_redundant_empty_element_closing_tags(self):
60
+ self.assert_soup('<br></br><br></br><br></br>', "<br/><br/><br/>")
61
+ self.assert_soup('</br></br></br>', "")
62
+
63
+ def test_empty_element(self):
64
+ # This verifies that any buffered data present when the parser
65
+ # finishes working is handled.
66
+ self.assert_soup("foo &# bar", "foo &amp;# bar")
67
+
68
+ def test_tracking_line_numbers(self):
69
+ # The html.parser TreeBuilder keeps track of line number and
70
+ # position of each element.
71
+ markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
72
+ soup = self.soup(markup)
73
+ assert 2 == soup.p.sourceline
74
+ assert 3 == soup.p.sourcepos
75
+ assert "sourceline" == soup.p.find('sourceline').name
76
+
77
+ # You can deactivate this behavior.
78
+ soup = self.soup(markup, store_line_numbers=False)
79
+ assert "sourceline" == soup.p.sourceline.name
80
+ assert "sourcepos" == soup.p.sourcepos.name
81
+
82
+ def test_on_duplicate_attribute(self):
83
+ # The html.parser tree builder has a variety of ways of
84
+ # handling a tag that contains the same attribute multiple times.
85
+
86
+ markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">'
87
+
88
+ # If you don't provide any particular value for
89
+ # on_duplicate_attribute, later values replace earlier values.
90
+ soup = self.soup(markup)
91
+ assert "url3" == soup.a['href']
92
+ assert ["cls"] == soup.a['class']
93
+ assert "id" == soup.a['id']
94
+
95
+ # You can also get this behavior explicitly.
96
+ def assert_attribute(on_duplicate_attribute, expected):
97
+ soup = self.soup(
98
+ markup, on_duplicate_attribute=on_duplicate_attribute
99
+ )
100
+ assert expected == soup.a['href']
101
+
102
+ # Verify that non-duplicate attributes are treated normally.
103
+ assert ["cls"] == soup.a['class']
104
+ assert "id" == soup.a['id']
105
+ assert_attribute(None, "url3")
106
+ assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3")
107
+
108
+ # You can ignore subsequent values in favor of the first.
109
+ assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1")
110
+
111
+ # And you can pass in a callable that does whatever you want.
112
+ def accumulate(attrs, key, value):
113
+ if not isinstance(attrs[key], list):
114
+ attrs[key] = [attrs[key]]
115
+ attrs[key].append(value)
116
+ assert_attribute(accumulate, ["url1", "url2", "url3"])
117
+
118
+ def test_html5_attributes(self):
119
+ # The html.parser TreeBuilder can convert any entity named in
120
+ # the HTML5 spec to a sequence of Unicode characters, and
121
+ # convert those Unicode characters to a (potentially
122
+ # different) named entity on the way out.
123
+ for input_element, output_unicode, output_element in (
124
+ ("&RightArrowLeftArrow;", '\u21c4', b'&rlarr;'),
125
+ ('&models;', '\u22a7', b'&models;'),
126
+ ('&Nfr;', '\U0001d511', b'&Nfr;'),
127
+ ('&ngeqq;', '\u2267\u0338', b'&ngeqq;'),
128
+ ('&not;', '\xac', b'&not;'),
129
+ ('&Not;', '\u2aec', b'&Not;'),
130
+ ('&quot;', '"', b'"'),
131
+ ('&there4;', '\u2234', b'&there4;'),
132
+ ('&Therefore;', '\u2234', b'&there4;'),
133
+ ('&therefore;', '\u2234', b'&there4;'),
134
+ ("&fjlig;", 'fj', b'fj'),
135
+ ("&sqcup;", '\u2294', b'&sqcup;'),
136
+ ("&sqcups;", '\u2294\ufe00', b'&sqcups;'),
137
+ ("&apos;", "'", b"'"),
138
+ ("&verbar;", "|", b"|"),
139
+ ):
140
+ markup = '<div>%s</div>' % input_element
141
+ div = self.soup(markup).div
142
+ without_element = div.encode()
143
+ expect = b"<div>%s</div>" % output_unicode.encode("utf8")
144
+ assert without_element == expect
145
+
146
+ with_element = div.encode(formatter="html")
147
+ expect = b"<div>%s</div>" % output_element
148
+ assert with_element == expect
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_lxml.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests to ensure that the lxml tree builder generates good trees."""
2
+
3
+ import pickle
4
+ import pytest
5
+ import re
6
+ import warnings
7
+ from . import LXML_PRESENT, LXML_VERSION
8
+
9
+ if LXML_PRESENT:
10
+ from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
11
+
12
+ from bs4 import (
13
+ BeautifulSoup,
14
+ BeautifulStoneSoup,
15
+ )
16
+ from bs4.element import Comment, Doctype, SoupStrainer
17
+ from . import (
18
+ HTMLTreeBuilderSmokeTest,
19
+ XMLTreeBuilderSmokeTest,
20
+ SOUP_SIEVE_PRESENT,
21
+ SoupTest,
22
+ )
23
+
24
+ @pytest.mark.skipif(
25
+ not LXML_PRESENT,
26
+ reason="lxml seems not to be present, not testing its tree builder."
27
+ )
28
+ class TestLXMLTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
29
+ """See ``HTMLTreeBuilderSmokeTest``."""
30
+
31
+ @property
32
+ def default_builder(self):
33
+ return LXMLTreeBuilder
34
+
35
+ def test_out_of_range_entity(self):
36
+ self.assert_soup(
37
+ "<p>foo&#10000000000000;bar</p>", "<p>foobar</p>")
38
+ self.assert_soup(
39
+ "<p>foo&#x10000000000000;bar</p>", "<p>foobar</p>")
40
+ self.assert_soup(
41
+ "<p>foo&#1000000000;bar</p>", "<p>foobar</p>")
42
+
43
+ def test_entities_in_foreign_document_encoding(self):
44
+ # We can't implement this case correctly because by the time we
45
+ # hear about markup like "&#147;", it's been (incorrectly) converted into
46
+ # a string like u'\x93'
47
+ pass
48
+
49
+ # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
50
+ # test if an old version of lxml is installed.
51
+
52
+ @pytest.mark.skipif(
53
+ not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
54
+ reason="Skipping doctype test for old version of lxml to avoid segfault."
55
+ )
56
+ def test_empty_doctype(self):
57
+ soup = self.soup("<!DOCTYPE>")
58
+ doctype = soup.contents[0]
59
+ assert "" == doctype.strip()
60
+
61
+ def test_beautifulstonesoup_is_xml_parser(self):
62
+ # Make sure that the deprecated BSS class uses an xml builder
63
+ # if one is installed.
64
+ with warnings.catch_warnings(record=True) as w:
65
+ soup = BeautifulStoneSoup("<b />")
66
+ assert "<b/>" == str(soup.b)
67
+ [warning] = w
68
+ assert warning.filename == __file__
69
+ assert "BeautifulStoneSoup class is deprecated" in str(warning.message)
70
+
71
+ def test_tracking_line_numbers(self):
72
+ # The lxml TreeBuilder cannot keep track of line numbers from
73
+ # the original markup. Even if you ask for line numbers, we
74
+ # don't have 'em.
75
+ #
76
+ # This means that if you have a tag like <sourceline> or
77
+ # <sourcepos>, attribute access will find it rather than
78
+ # giving you a numeric answer.
79
+ soup = self.soup(
80
+ "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>",
81
+ store_line_numbers=True
82
+ )
83
+ assert "sourceline" == soup.p.sourceline.name
84
+ assert "sourcepos" == soup.p.sourcepos.name
85
+
86
+ @pytest.mark.skipif(
87
+ not LXML_PRESENT,
88
+ reason="lxml seems not to be present, not testing its XML tree builder."
89
+ )
90
+ class TestLXMLXMLTreeBuilder(SoupTest, XMLTreeBuilderSmokeTest):
91
+ """See ``HTMLTreeBuilderSmokeTest``."""
92
+
93
+ @property
94
+ def default_builder(self):
95
+ return LXMLTreeBuilderForXML
96
+
97
+ def test_namespace_indexing(self):
98
+ soup = self.soup(
99
+ '<?xml version="1.1"?>\n'
100
+ '<root>'
101
+ '<tag xmlns="http://unprefixed-namespace.com">content</tag>'
102
+ '<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>'
103
+ '<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">'
104
+ '<subtag xmlns="http://another-unprefixed-namespace.com">'
105
+ '<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">'
106
+ '</prefix2:tag3>'
107
+ '</root>'
108
+ )
109
+
110
+ # The BeautifulSoup object includes every namespace prefix
111
+ # defined in the entire document. This is the default set of
112
+ # namespaces used by soupsieve.
113
+ #
114
+ # Un-prefixed namespaces are not included, and if a given
115
+ # prefix is defined twice, only the first prefix encountered
116
+ # in the document shows up here.
117
+ assert soup._namespaces == {
118
+ 'xml': 'http://www.w3.org/XML/1998/namespace',
119
+ 'prefix': 'http://prefixed-namespace.com',
120
+ 'prefix2': 'http://another-namespace.com'
121
+ }
122
+
123
+ # A Tag object includes only the namespace prefixes
124
+ # that were in scope when it was parsed.
125
+
126
+ # We do not track un-prefixed namespaces as we can only hold
127
+ # one (the first one), and it will be recognized as the
128
+ # default namespace by soupsieve, even when operating from a
129
+ # tag with a different un-prefixed namespace.
130
+ assert soup.tag._namespaces == {
131
+ 'xml': 'http://www.w3.org/XML/1998/namespace',
132
+ }
133
+
134
+ assert soup.tag2._namespaces == {
135
+ 'prefix': 'http://prefixed-namespace.com',
136
+ 'xml': 'http://www.w3.org/XML/1998/namespace',
137
+ }
138
+
139
+ assert soup.subtag._namespaces == {
140
+ 'prefix2': 'http://another-namespace.com',
141
+ 'xml': 'http://www.w3.org/XML/1998/namespace',
142
+ }
143
+
144
+ assert soup.subsubtag._namespaces == {
145
+ 'prefix2': 'http://another-namespace.com',
146
+ 'xml': 'http://www.w3.org/XML/1998/namespace',
147
+ }
148
+
149
+
150
+ @pytest.mark.skipif(
151
+ not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed"
152
+ )
153
+ def test_namespace_interaction_with_select_and_find(self):
154
+ # Demonstrate how namespaces interact with select* and
155
+ # find* methods.
156
+
157
+ soup = self.soup(
158
+ '<?xml version="1.1"?>\n'
159
+ '<root>'
160
+ '<tag xmlns="http://unprefixed-namespace.com">content</tag>'
161
+ '<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>'
162
+ '<subtag xmlns:prefix="http://another-namespace-same-prefix.com">'
163
+ '<prefix:tag3>'
164
+ '</subtag>'
165
+ '</root>'
166
+ )
167
+
168
+ # soupselect uses namespace URIs.
169
+ assert soup.select_one('tag').name == 'tag'
170
+ assert soup.select_one('prefix|tag2').name == 'tag2'
171
+
172
+ # If a prefix is declared more than once, only the first usage
173
+ # is registered with the BeautifulSoup object.
174
+ assert soup.select_one('prefix|tag3') is None
175
+
176
+ # But you can always explicitly specify a namespace dictionary.
177
+ assert soup.select_one(
178
+ 'prefix|tag3', namespaces=soup.subtag._namespaces
179
+ ).name == 'tag3'
180
+
181
+ # And a Tag (as opposed to the BeautifulSoup object) will
182
+ # have a set of default namespaces scoped to that Tag.
183
+ assert soup.subtag.select_one('prefix|tag3').name=='tag3'
184
+
185
+ # the find() methods aren't fully namespace-aware; they just
186
+ # look at prefixes.
187
+ assert soup.find('tag').name == 'tag'
188
+ assert soup.find('prefix:tag2').name == 'tag2'
189
+ assert soup.find('prefix:tag3').name == 'tag3'
190
+ assert soup.subtag.find('prefix:tag3').name == 'tag3'
191
+
192
+ def test_pickle_restores_builder(self):
193
+ # The lxml TreeBuilder is not picklable, so when unpickling
194
+ # a document created with it, a new TreeBuilder of the
195
+ # appropriate class is created.
196
+ soup = self.soup("<a>some markup</a>")
197
+ assert isinstance(soup.builder, self.default_builder)
198
+ pickled = pickle.dumps(soup)
199
+ unpickled = pickle.loads(pickled)
200
+
201
+ assert "some markup" == unpickled.a.string
202
+ assert unpickled.builder != soup.builder
203
+ assert isinstance(unpickled.builder, self.default_builder)
vlmpy310/lib/python3.10/site-packages/bs4/tests/test_navigablestring.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from bs4.element import (
4
+ CData,
5
+ Comment,
6
+ Declaration,
7
+ Doctype,
8
+ NavigableString,
9
+ RubyParenthesisString,
10
+ RubyTextString,
11
+ Script,
12
+ Stylesheet,
13
+ TemplateString,
14
+ )
15
+
16
+ from . import SoupTest
17
+
18
+ class TestNavigableString(SoupTest):
19
+
20
+ def test_text_acquisition_methods(self):
21
+ # These methods are intended for use against Tag, but they
22
+ # work on NavigableString as well,
23
+
24
+ s = NavigableString("fee ")
25
+ cdata = CData("fie ")
26
+ comment = Comment("foe ")
27
+
28
+ assert "fee " == s.get_text()
29
+ assert "fee" == s.get_text(strip=True)
30
+ assert ["fee "] == list(s.strings)
31
+ assert ["fee"] == list(s.stripped_strings)
32
+ assert ["fee "] == list(s._all_strings())
33
+
34
+ assert "fie " == cdata.get_text()
35
+ assert "fie" == cdata.get_text(strip=True)
36
+ assert ["fie "] == list(cdata.strings)
37
+ assert ["fie"] == list(cdata.stripped_strings)
38
+ assert ["fie "] == list(cdata._all_strings())
39
+
40
+ # Since a Comment isn't normally considered 'text',
41
+ # these methods generally do nothing.
42
+ assert "" == comment.get_text()
43
+ assert [] == list(comment.strings)
44
+ assert [] == list(comment.stripped_strings)
45
+ assert [] == list(comment._all_strings())
46
+
47
+ # Unless you specifically say that comments are okay.
48
+ assert "foe" == comment.get_text(strip=True, types=Comment)
49
+ assert "foe " == comment.get_text(types=(Comment, NavigableString))
50
+
51
+ def test_string_has_immutable_name_property(self):
52
+ # string.name is defined as None and can't be modified
53
+ string = self.soup("s").string
54
+ assert None == string.name
55
+ with pytest.raises(AttributeError):
56
+ string.name = 'foo'
57
+
58
+ class TestNavigableStringSubclasses(SoupTest):
59
+
60
+ def test_cdata(self):
61
+ # None of the current builders turn CDATA sections into CData
62
+ # objects, but you can create them manually.
63
+ soup = self.soup("")
64
+ cdata = CData("foo")
65
+ soup.insert(1, cdata)
66
+ assert str(soup) == "<![CDATA[foo]]>"
67
+ assert soup.find(string="foo") == "foo"
68
+ assert soup.contents[0] == "foo"
69
+
70
+ def test_cdata_is_never_formatted(self):
71
+ """Text inside a CData object is passed into the formatter.
72
+
73
+ But the return value is ignored.
74
+ """
75
+
76
+ self.count = 0
77
+ def increment(*args):
78
+ self.count += 1
79
+ return "BITTER FAILURE"
80
+
81
+ soup = self.soup("")
82
+ cdata = CData("<><><>")
83
+ soup.insert(1, cdata)
84
+ assert b"<![CDATA[<><><>]]>" == soup.encode(formatter=increment)
85
+ assert 1 == self.count
86
+
87
+ def test_doctype_ends_in_newline(self):
88
+ # Unlike other NavigableString subclasses, a DOCTYPE always ends
89
+ # in a newline.
90
+ doctype = Doctype("foo")
91
+ soup = self.soup("")
92
+ soup.insert(1, doctype)
93
+ assert soup.encode() == b"<!DOCTYPE foo>\n"
94
+
95
+ def test_declaration(self):
96
+ d = Declaration("foo")
97
+ assert "<?foo?>" == d.output_ready()
98
+
99
+ def test_default_string_containers(self):
100
+ # In some cases, we use different NavigableString subclasses for
101
+ # the same text in different tags.
102
+ soup = self.soup(
103
+ "<div>text</div><script>text</script><style>text</style>"
104
+ )
105
+ assert [NavigableString, Script, Stylesheet] == [
106
+ x.__class__ for x in soup.find_all(string=True)
107
+ ]
108
+
109
+ # The TemplateString is a little unusual because it's generally found
110
+ # _inside_ children of a <template> element, not a direct child of the
111
+ # <template> element.
112
+ soup = self.soup(
113
+ "<template>Some text<p>In a tag</p></template>Some text outside"
114
+ )
115
+ assert all(
116
+ isinstance(x, TemplateString)
117
+ for x in soup.template._all_strings(types=None)
118
+ )
119
+
120
+ # Once the <template> tag closed, we went back to using
121
+ # NavigableString.
122
+ outside = soup.template.next_sibling
123
+ assert isinstance(outside, NavigableString)
124
+ assert not isinstance(outside, TemplateString)
125
+
126
+ # The TemplateString is also unusual because it can contain
127
+ # NavigableString subclasses of _other_ types, such as
128
+ # Comment.
129
+ markup = b"<template>Some text<p>In a tag</p><!--with a comment--></template>"
130
+ soup = self.soup(markup)
131
+ assert markup == soup.template.encode("utf8")
132
+
133
+ def test_ruby_strings(self):
134
+ markup = "<ruby>漢 <rp>(</rp><rt>kan</rt><rp>)</rp> 字 <rp>(</rp><rt>ji</rt><rp>)</rp></ruby>"
135
+ soup = self.soup(markup)
136
+ assert isinstance(soup.rp.string, RubyParenthesisString)
137
+ assert isinstance(soup.rt.string, RubyTextString)
138
+
139
+ # Just as a demo, here's what this means for get_text usage.
140
+ assert "漢字" == soup.get_text(strip=True)
141
+ assert "漢(kan)字(ji)" == soup.get_text(
142
+ strip=True,
143
+ types=(NavigableString, RubyTextString, RubyParenthesisString)
144
+ )