diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/INSTALLER b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/LICENSE b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/LICENSE
@@ -0,0 +1,279 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see https://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations, which became
+Zope Corporation. In 2001, the Python Software Foundation (PSF, see
+https://www.python.org/psf/) was formed, a non-profit organization
+created specifically to own Python-related Intellectual Property.
+Zope Corporation was a sponsoring member of the PSF.
+
+All Python releases are Open Source (see https://opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2 and above 2.1.1 2001-now PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+Python software and documentation are licensed under the
+Python Software Foundation License Version 2.
+
+Starting with Python 3.8.6, examples, recipes, and other code in
+the documentation are dual licensed under the PSF License Version 2
+and the Zero-Clause BSD license.
+
+Some software incorporated into Python is under different licenses.
+The licenses are listed with code falling under that license.
+
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
+----------------------------------------------------------------------
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/METADATA b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..da971631f809cf56ca2e1cf5caafcfe1e78c19f3
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/METADATA
@@ -0,0 +1,126 @@
+Metadata-Version: 2.1
+Name: aiohappyeyeballs
+Version: 2.4.2
+Summary: Happy Eyeballs for asyncio
+Home-page: https://github.com/aio-libs/aiohappyeyeballs
+License: Python-2.0.1
+Author: J. Nick Koston
+Author-email: nick@koston.org
+Requires-Python: >=3.8
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Software Development :: Libraries
+Project-URL: Bug Tracker, https://github.com/aio-libs/aiohappyeyeballs/issues
+Project-URL: Changelog, https://github.com/aio-libs/aiohappyeyeballs/blob/main/CHANGELOG.md
+Project-URL: Documentation, https://aiohappyeyeballs.readthedocs.io
+Project-URL: Repository, https://github.com/aio-libs/aiohappyeyeballs
+Description-Content-Type: text/markdown
+
+# aiohappyeyeballs
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+---
+
+**Documentation**: https://aiohappyeyeballs.readthedocs.io
+
+**Source Code**: https://github.com/aio-libs/aiohappyeyeballs
+
+---
+
+[Happy Eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs)
+([RFC 8305](https://www.rfc-editor.org/rfc/rfc8305.html))
+
+## Use case
+
+This library exists to allow connecting with
+[Happy Eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs)
+([RFC 8305](https://www.rfc-editor.org/rfc/rfc8305.html))
+when you
+already have a list of addrinfo and not a DNS name.
+
+The stdlib version of `loop.create_connection()`
+will only work when you pass in an unresolved name which
+is not a good fit when using DNS caching or resolving
+names via another method such as `zeroconf`.
+
+## Installation
+
+Install this via pip (or your favourite package manager):
+
+`pip install aiohappyeyeballs`
+
+## License
+
+[aiohappyeyeballs is licensed under the same terms as cpython itself.](https://github.com/python/cpython/blob/main/LICENSE)
+
+## Example usage
+
+```python
+
+addr_infos = await loop.getaddrinfo("example.org", 80)
+
+socket = await start_connection(addr_infos)
+socket = await start_connection(addr_infos, local_addr_infos=local_addr_infos, happy_eyeballs_delay=0.2)
+
+transport, protocol = await loop.create_connection(
+ MyProtocol, sock=socket, ...)
+
+# Remove the first address for each family from addr_info
+pop_addr_infos_interleave(addr_info, 1)
+
+# Remove all matching address from addr_info
+remove_addr_infos(addr_info, "dead::beef::")
+
+# Convert a local_addr to local_addr_infos
+local_addr_infos = addr_to_addr_infos(("127.0.0.1",0))
+```
+
+## Credits
+
+This package contains code from cpython and is licensed under the same terms as cpython itself.
+
+This package was created with
+[Copier](https://copier.readthedocs.io/) and the
+[browniebroke/pypackage-template](https://github.com/browniebroke/pypackage-template)
+project template.
+
diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/RECORD b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..c6e8b66a4fd7b5d896a36386d8bb774873a023de
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/RECORD
@@ -0,0 +1,18 @@
+aiohappyeyeballs-2.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+aiohappyeyeballs-2.4.2.dist-info/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936
+aiohappyeyeballs-2.4.2.dist-info/METADATA,sha256=_ziodLe_RgvRdvRSJPZXCJ_yX5J6Phw8-mCHS4Mf__k,6038
+aiohappyeyeballs-2.4.2.dist-info/RECORD,,
+aiohappyeyeballs-2.4.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+aiohappyeyeballs-2.4.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
+aiohappyeyeballs/__init__.py,sha256=Mh38VpQBOrri17V9BbQZlMA4wiAWECVXWRThzRStbaM,317
+aiohappyeyeballs/__pycache__/__init__.cpython-310.pyc,,
+aiohappyeyeballs/__pycache__/impl.cpython-310.pyc,,
+aiohappyeyeballs/__pycache__/staggered.cpython-310.pyc,,
+aiohappyeyeballs/__pycache__/types.cpython-310.pyc,,
+aiohappyeyeballs/__pycache__/utils.cpython-310.pyc,,
+aiohappyeyeballs/_staggered.py,sha256=IJaJ7byBVtgVwVzbcRluzXy4co3v3KdRDBIDXj6YVHE,3691
+aiohappyeyeballs/impl.py,sha256=f0gsqRwZI4EaBifuNnYuggQkubFOK7PO_jnvmZAdJyU,7252
+aiohappyeyeballs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+aiohappyeyeballs/staggered.py,sha256=jS92EwfrnDQKSK1gU5K_QCnA-m7jz1bf5e2PaI-OKVk,249
+aiohappyeyeballs/types.py,sha256=iYPiBTl5J7YEjnIqEOVUTRPzz2DwqSHBRhvbAlM0zv0,234
+aiohappyeyeballs/utils.py,sha256=W_Oaf1iP8wYRHo6B95eYx-ZxbjpxyWwYgTdkhWqGF5c,3026
diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/REQUESTED b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/WHEEL b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..d73ccaae8e0eea45949b0957a5af034099b36aa4
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/aiohappyeyeballs-2.4.2.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: poetry-core 1.9.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/__init__.py b/valley/lib/python3.10/site-packages/cpuinfo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0ae1cedef2202096faadc3ff0dd0614f8adf394
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/cpuinfo/__init__.py
@@ -0,0 +1,5 @@
+
+import sys
+from cpuinfo.cpuinfo import *
+
+
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/__main__.py b/valley/lib/python3.10/site-packages/cpuinfo/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..09ebf764121cc9bae49421535e56d0c5a9394290
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/cpuinfo/__main__.py
@@ -0,0 +1,5 @@
+
+import cpuinfo
+
+cpuinfo.main()
+
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dffd4a66ad59015fe8c1d32e981f62c6324e8163
Binary files /dev/null and b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__init__.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64715dcb4f2e0d5e1b8191d88c6c6e1011325fb2
Binary files /dev/null and b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/__main__.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f5b20c5e0b6330c807421040caf231be5919c60
Binary files /dev/null and b/valley/lib/python3.10/site-packages/cpuinfo/__pycache__/cpuinfo.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/cpuinfo/cpuinfo.py b/valley/lib/python3.10/site-packages/cpuinfo/cpuinfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea2f90e397404bb200d8c74a0331b9ede5c5b106
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/cpuinfo/cpuinfo.py
@@ -0,0 +1,2827 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+# Copyright (c) 2014-2022 Matthew Brennan Jones
+# Py-cpuinfo gets CPU info with pure Python
+# It uses the MIT License
+# It is hosted at: https://github.com/workhorsy/py-cpuinfo
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+CPUINFO_VERSION = (9, 0, 0)
+CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
+
+import os, sys
+import platform
+import multiprocessing
+import ctypes
+
+
+CAN_CALL_CPUID_IN_SUBPROCESS = True
+
+g_trace = None
+
+
+class Trace(object):
+ def __init__(self, is_active, is_stored_in_string):
+ self._is_active = is_active
+ if not self._is_active:
+ return
+
+ from datetime import datetime
+ from io import StringIO
+
+ if is_stored_in_string:
+ self._output = StringIO()
+ else:
+ date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
+ self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w')
+
+ self._stdout = StringIO()
+ self._stderr = StringIO()
+ self._err = None
+
+ def header(self, msg):
+ if not self._is_active: return
+
+ from inspect import stack
+ frame = stack()[1]
+ file = frame[1]
+ line = frame[2]
+ self._output.write("{0} ({1} {2})\n".format(msg, file, line))
+ self._output.flush()
+
+ def success(self):
+ if not self._is_active: return
+
+ from inspect import stack
+ frame = stack()[1]
+ file = frame[1]
+ line = frame[2]
+
+ self._output.write("Success ... ({0} {1})\n\n".format(file, line))
+ self._output.flush()
+
+ def fail(self, msg):
+ if not self._is_active: return
+
+ from inspect import stack
+ frame = stack()[1]
+ file = frame[1]
+ line = frame[2]
+
+ if isinstance(msg, str):
+ msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n'
+
+ self._output.write(msg)
+ self._output.write("Failed ... ({0} {1})\n\n".format(file, line))
+ self._output.flush()
+ elif isinstance(msg, Exception):
+ from traceback import format_exc
+ err_string = format_exc()
+ self._output.write("\tFailed ... ({0} {1})\n".format(file, line))
+ self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n')
+ self._output.flush()
+
+ def command_header(self, msg):
+ if not self._is_active: return
+
+ from inspect import stack
+ frame = stack()[3]
+ file = frame[1]
+ line = frame[2]
+ self._output.write("\t{0} ({1} {2})\n".format(msg, file, line))
+ self._output.flush()
+
+ def command_output(self, msg, output):
+ if not self._is_active: return
+
+ self._output.write("\t\t{0}\n".format(msg))
+ self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n')
+ self._output.flush()
+
+ def keys(self, keys, info, new_info):
+ if not self._is_active: return
+
+ from inspect import stack
+ frame = stack()[2]
+ file = frame[1]
+ line = frame[2]
+
+ # List updated keys
+ self._output.write("\tChanged keys ({0} {1})\n".format(file, line))
+ changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]]
+ if changed_keys:
+ for key in changed_keys:
+ self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key]))
+ else:
+ self._output.write('\t\tNone\n')
+
+ # List new keys
+ self._output.write("\tNew keys ({0} {1})\n".format(file, line))
+ new_keys = [key for key in keys if key in new_info and key not in info]
+ if new_keys:
+ for key in new_keys:
+ self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key]))
+ else:
+ self._output.write('\t\tNone\n')
+
+ self._output.write('\n')
+ self._output.flush()
+
+ def write(self, msg):
+ if not self._is_active: return
+
+ self._output.write(msg + '\n')
+ self._output.flush()
+
+ def to_dict(self, info, is_fail):
+ return {
+ 'output' : self._output.getvalue(),
+ 'stdout' : self._stdout.getvalue(),
+ 'stderr' : self._stderr.getvalue(),
+ 'info' : info,
+ 'err' : self._err,
+ 'is_fail' : is_fail
+ }
+
+class DataSource(object):
+ bits = platform.architecture()[0]
+ cpu_count = multiprocessing.cpu_count()
+ is_windows = platform.system().lower() == 'windows'
+ arch_string_raw = platform.machine()
+ uname_string_raw = platform.uname()[5]
+ can_cpuid = True
+
+ @staticmethod
+ def has_proc_cpuinfo():
+ return os.path.exists('/proc/cpuinfo')
+
+ @staticmethod
+ def has_dmesg():
+ return len(_program_paths('dmesg')) > 0
+
+ @staticmethod
+ def has_var_run_dmesg_boot():
+ uname = platform.system().strip().strip('"').strip("'").strip().lower()
+ return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
+
+ @staticmethod
+ def has_cpufreq_info():
+ return len(_program_paths('cpufreq-info')) > 0
+
+ @staticmethod
+ def has_sestatus():
+ return len(_program_paths('sestatus')) > 0
+
+ @staticmethod
+ def has_sysctl():
+ return len(_program_paths('sysctl')) > 0
+
+ @staticmethod
+ def has_isainfo():
+ return len(_program_paths('isainfo')) > 0
+
+ @staticmethod
+ def has_kstat():
+ return len(_program_paths('kstat')) > 0
+
+ @staticmethod
+ def has_sysinfo():
+ uname = platform.system().strip().strip('"').strip("'").strip().lower()
+ is_beos = 'beos' in uname or 'haiku' in uname
+ return is_beos and len(_program_paths('sysinfo')) > 0
+
+ @staticmethod
+ def has_lscpu():
+ return len(_program_paths('lscpu')) > 0
+
+ @staticmethod
+ def has_ibm_pa_features():
+ return len(_program_paths('lsprop')) > 0
+
+ @staticmethod
+ def has_wmic():
+ returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
+ return returncode == 0 and len(output) > 0
+
+ @staticmethod
+ def cat_proc_cpuinfo():
+ return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
+
+ @staticmethod
+ def cpufreq_info():
+ return _run_and_get_stdout(['cpufreq-info'])
+
+ @staticmethod
+ def sestatus_b():
+ return _run_and_get_stdout(['sestatus', '-b'])
+
+ @staticmethod
+ def dmesg_a():
+ return _run_and_get_stdout(['dmesg', '-a'])
+
+ @staticmethod
+ def cat_var_run_dmesg_boot():
+ return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
+
+ @staticmethod
+ def sysctl_machdep_cpu_hw_cpufrequency():
+ return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
+
+ @staticmethod
+ def isainfo_vb():
+ return _run_and_get_stdout(['isainfo', '-vb'])
+
+ @staticmethod
+ def kstat_m_cpu_info():
+ return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
+
+ @staticmethod
+ def sysinfo_cpu():
+ return _run_and_get_stdout(['sysinfo', '-cpu'])
+
+ @staticmethod
+ def lscpu():
+ return _run_and_get_stdout(['lscpu'])
+
+ @staticmethod
+ def ibm_pa_features():
+ import glob
+
+ ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
+ if ibm_features:
+ return _run_and_get_stdout(['lsprop', ibm_features[0]])
+
+ @staticmethod
+ def wmic_cpu():
+ return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
+
+ @staticmethod
+ def winreg_processor_brand():
+ processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString")
+ return processor_brand.strip()
+
+ @staticmethod
+ def winreg_vendor_id_raw():
+ vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier")
+ return vendor_id_raw
+
+ @staticmethod
+ def winreg_arch_string_raw():
+ arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE")
+ return arch_string_raw
+
+ @staticmethod
+ def winreg_hz_actual():
+ hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz")
+ hz_actual = _to_decimal_string(hz_actual)
+ return hz_actual
+
+ @staticmethod
+ def winreg_feature_bits():
+ feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet")
+ return feature_bits
+
+
+def _program_paths(program_name):
+ paths = []
+ exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
+ for p in os.environ['PATH'].split(os.pathsep):
+ p = os.path.join(p, program_name)
+ if os.access(p, os.X_OK):
+ paths.append(p)
+ for e in exts:
+ pext = p + e
+ if os.access(pext, os.X_OK):
+ paths.append(pext)
+ return paths
+
+def _run_and_get_stdout(command, pipe_command=None):
+ from subprocess import Popen, PIPE
+
+ g_trace.command_header('Running command "' + ' '.join(command) + '" ...')
+
+ # Run the command normally
+ if not pipe_command:
+ p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ # Run the command and pipe it into another command
+ else:
+ p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE)
+ p2.stdout.close()
+
+ # Get the stdout and stderr
+ stdout_output, stderr_output = p1.communicate()
+ stdout_output = stdout_output.decode(encoding='UTF-8')
+ stderr_output = stderr_output.decode(encoding='UTF-8')
+
+ # Send the result to the logger
+ g_trace.command_output('return code:', str(p1.returncode))
+ g_trace.command_output('stdout:', stdout_output)
+
+ # Return the return code and stdout
+ return p1.returncode, stdout_output
+
+def _read_windows_registry_key(key_name, field_name):
+ g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name))
+
+ try:
+ import _winreg as winreg
+ except ImportError as err:
+ try:
+ import winreg
+ except ImportError as err:
+ pass
+
+ key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
+ value = winreg.QueryValueEx(key, field_name)[0]
+ winreg.CloseKey(key)
+ g_trace.command_output('value:', str(value))
+ return value
+
+# Make sure we are running on a supported system
+def _check_arch():
+ arch, bits = _parse_arch(DataSource.arch_string_raw)
+ if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8',
+ 'PPC_64', 'S390X', 'MIPS_32', 'MIPS_64',
+ "RISCV_32", "RISCV_64"]:
+ raise Exception("py-cpuinfo currently only works on X86 "
+ "and some ARM/PPC/S390X/MIPS/RISCV CPUs.")
+
+def _obj_to_b64(thing):
+ import pickle
+ import base64
+
+ a = thing
+ b = pickle.dumps(a)
+ c = base64.b64encode(b)
+ d = c.decode('utf8')
+ return d
+
+def _b64_to_obj(thing):
+ import pickle
+ import base64
+
+ try:
+ a = base64.b64decode(thing)
+ b = pickle.loads(a)
+ return b
+ except Exception:
+ return {}
+
+def _utf_to_str(input):
+ if isinstance(input, list):
+ return [_utf_to_str(element) for element in input]
+ elif isinstance(input, dict):
+ return {_utf_to_str(key): _utf_to_str(value)
+ for key, value in input.items()}
+ else:
+ return input
+
+def _copy_new_fields(info, new_info):
+ keys = [
+ 'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
+ 'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
+ 'arch_string_raw', 'uname_string_raw',
+ 'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
+ 'stepping', 'model', 'family',
+ 'processor_type', 'flags',
+ 'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
+ ]
+
+ g_trace.keys(keys, info, new_info)
+
+ # Update the keys with new values
+ for key in keys:
+ if new_info.get(key, None) and not info.get(key, None):
+ info[key] = new_info[key]
+ elif key == 'flags' and new_info.get('flags'):
+ for f in new_info['flags']:
+ if f not in info['flags']: info['flags'].append(f)
+ info['flags'].sort()
+
+def _get_field_actual(cant_be_number, raw_string, field_names):
+ for line in raw_string.splitlines():
+ for field_name in field_names:
+ field_name = field_name.lower()
+ if ':' in line:
+ left, right = line.split(':', 1)
+ left = left.strip().lower()
+ right = right.strip()
+ if left == field_name and len(right) > 0:
+ if cant_be_number:
+ if not right.isdigit():
+ return right
+ else:
+ return right
+
+ return None
+
+def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
+ retval = _get_field_actual(cant_be_number, raw_string, field_names)
+
+ # Convert the return value
+ if retval and convert_to:
+ try:
+ retval = convert_to(retval)
+ except Exception:
+ retval = default_value
+
+ # Return the default if there is no return value
+ if retval is None:
+ retval = default_value
+
+ return retval
+
+def _to_decimal_string(ticks):
+ try:
+ # Convert to string
+ ticks = '{0}'.format(ticks)
+ # Sometimes ',' is used as a decimal separator
+ ticks = ticks.replace(',', '.')
+
+ # Strip off non numbers and decimal places
+ ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
+ if ticks == '':
+ ticks = '0'
+
+ # Add decimal if missing
+ if '.' not in ticks:
+ ticks = '{0}.0'.format(ticks)
+
+ # Remove trailing zeros
+ ticks = ticks.rstrip('0')
+
+ # Add one trailing zero for empty right side
+ if ticks.endswith('.'):
+ ticks = '{0}0'.format(ticks)
+
+ # Make sure the number can be converted to a float
+ ticks = float(ticks)
+ ticks = '{0}'.format(ticks)
+ return ticks
+ except Exception:
+ return '0.0'
+
+def _hz_short_to_full(ticks, scale):
+ try:
+ # Make sure the number can be converted to a float
+ ticks = float(ticks)
+ ticks = '{0}'.format(ticks)
+
+ # Scale the numbers
+ hz = ticks.lstrip('0')
+ old_index = hz.index('.')
+ hz = hz.replace('.', '')
+ hz = hz.ljust(scale + old_index+1, '0')
+ new_index = old_index + scale
+ hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
+ left, right = hz.split('.')
+ left, right = int(left), int(right)
+ return (left, right)
+ except Exception:
+ return (0, 0)
+
+def _hz_friendly_to_full(hz_string):
+ try:
+ hz_string = hz_string.strip().lower()
+ hz, scale = (None, None)
+
+ if hz_string.endswith('ghz'):
+ scale = 9
+ elif hz_string.endswith('mhz'):
+ scale = 6
+ elif hz_string.endswith('hz'):
+ scale = 0
+
+ hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
+ if not '.' in hz:
+ hz += '.0'
+
+ hz, scale = _hz_short_to_full(hz, scale)
+
+ return (hz, scale)
+ except Exception:
+ return (0, 0)
+
+def _hz_short_to_friendly(ticks, scale):
+ try:
+ # Get the raw Hz as a string
+ left, right = _hz_short_to_full(ticks, scale)
+ result = '{0}.{1}'.format(left, right)
+
+ # Get the location of the dot, and remove said dot
+ dot_index = result.index('.')
+ result = result.replace('.', '')
+
+ # Get the Hz symbol and scale
+ symbol = "Hz"
+ scale = 0
+ if dot_index > 9:
+ symbol = "GHz"
+ scale = 9
+ elif dot_index > 6:
+ symbol = "MHz"
+ scale = 6
+ elif dot_index > 3:
+ symbol = "KHz"
+ scale = 3
+
+ # Get the Hz with the dot at the new scaled point
+ result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
+
+ # Format the ticks to have 4 numbers after the decimal
+ # and remove any superfluous zeroes.
+ result = '{0:.4f} {1}'.format(float(result), symbol)
+ result = result.rstrip('0')
+ return result
+ except Exception:
+ return '0.0000 Hz'
+
+def _to_friendly_bytes(input):
+ import re
+
+ if not input:
+ return input
+ input = "{0}".format(input)
+
+ formats = {
+ r"^[0-9]+B$" : 'B',
+ r"^[0-9]+K$" : 'KB',
+ r"^[0-9]+M$" : 'MB',
+ r"^[0-9]+G$" : 'GB'
+ }
+
+ for pattern, friendly_size in formats.items():
+ if re.match(pattern, input):
+ return "{0} {1}".format(input[ : -1].strip(), friendly_size)
+
+ return input
+
+def _friendly_bytes_to_int(friendly_bytes):
+ input = friendly_bytes.lower()
+
+ formats = [
+ {'gib' : 1024 * 1024 * 1024},
+ {'mib' : 1024 * 1024},
+ {'kib' : 1024},
+
+ {'gb' : 1024 * 1024 * 1024},
+ {'mb' : 1024 * 1024},
+ {'kb' : 1024},
+
+ {'g' : 1024 * 1024 * 1024},
+ {'m' : 1024 * 1024},
+ {'k' : 1024},
+ {'b' : 1},
+ ]
+
+ try:
+ for entry in formats:
+ pattern = list(entry.keys())[0]
+ multiplier = list(entry.values())[0]
+ if input.endswith(pattern):
+ return int(input.split(pattern)[0].strip()) * multiplier
+
+ except Exception as err:
+ pass
+
+ return friendly_bytes
+
+def _parse_cpu_brand_string(cpu_string):
+ # Just return 0 if the processor brand does not have the Hz
+ if not 'hz' in cpu_string.lower():
+ return ('0.0', 0)
+
+ hz = cpu_string.lower()
+ scale = 0
+
+ if hz.endswith('mhz'):
+ scale = 6
+ elif hz.endswith('ghz'):
+ scale = 9
+ if '@' in hz:
+ hz = hz.split('@')[1]
+ else:
+ hz = hz.rsplit(None, 1)[1]
+
+ hz = hz.rstrip('mhz').rstrip('ghz').strip()
+ hz = _to_decimal_string(hz)
+
+ return (hz, scale)
+
+def _parse_cpu_brand_string_dx(cpu_string):
+ import re
+
+ # Find all the strings inside brackets ()
+ starts = [m.start() for m in re.finditer(r"\(", cpu_string)]
+ ends = [m.start() for m in re.finditer(r"\)", cpu_string)]
+ insides = {k: v for k, v in zip(starts, ends)}
+ insides = [cpu_string[start+1 : end] for start, end in insides.items()]
+
+ # Find all the fields
+ vendor_id, stepping, model, family = (None, None, None, None)
+ for inside in insides:
+ for pair in inside.split(','):
+ pair = [n.strip() for n in pair.split(':')]
+ if len(pair) > 1:
+ name, value = pair[0], pair[1]
+ if name == 'origin':
+ vendor_id = value.strip('"')
+ elif name == 'stepping':
+ stepping = int(value.lstrip('0x'), 16)
+ elif name == 'model':
+ model = int(value.lstrip('0x'), 16)
+ elif name in ['fam', 'family']:
+ family = int(value.lstrip('0x'), 16)
+
+ # Find the Processor Brand
+ # Strip off extra strings in brackets at end
+ brand = cpu_string.strip()
+ is_working = True
+ while is_working:
+ is_working = False
+ for inside in insides:
+ full = "({0})".format(inside)
+ if brand.endswith(full):
+ brand = brand[ :-len(full)].strip()
+ is_working = True
+
+ # Find the Hz in the brand string
+ hz_brand, scale = _parse_cpu_brand_string(brand)
+
+ # Find Hz inside brackets () after the brand string
+ if hz_brand == '0.0':
+ for inside in insides:
+ hz = inside
+ for entry in ['GHz', 'MHz', 'Hz']:
+ if entry in hz:
+ hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
+ hz_brand, scale = _parse_cpu_brand_string(hz)
+ break
+
+ return (hz_brand, scale, brand, vendor_id, stepping, model, family)
+
+def _parse_dmesg_output(output):
+ try:
+ # Get all the dmesg lines that might contain a CPU string
+ lines = output.split(' CPU0:')[1:] + \
+ output.split(' CPU1:')[1:] + \
+ output.split(' CPU:')[1:] + \
+ output.split('\nCPU0:')[1:] + \
+ output.split('\nCPU1:')[1:] + \
+ output.split('\nCPU:')[1:]
+ lines = [l.split('\n')[0].strip() for l in lines]
+
+ # Convert the lines to CPU strings
+ cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
+
+ # Find the CPU string that has the most fields
+ best_string = None
+ highest_count = 0
+ for cpu_string in cpu_strings:
+ count = sum([n is not None for n in cpu_string])
+ if count > highest_count:
+ highest_count = count
+ best_string = cpu_string
+
+ # If no CPU string was found, return {}
+ if not best_string:
+ return {}
+
+ hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
+
+ # Origin
+ if ' Origin=' in output:
+ fields = output[output.find(' Origin=') : ].split('\n')[0]
+ fields = fields.strip().split()
+ fields = [n.strip().split('=') for n in fields]
+ fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
+
+ for field in fields:
+ name = list(field.keys())[0]
+ value = list(field.values())[0]
+
+ if name == 'origin':
+ vendor_id = value.strip('"')
+ elif name == 'stepping':
+ stepping = int(value.lstrip('0x'), 16)
+ elif name == 'model':
+ model = int(value.lstrip('0x'), 16)
+ elif name in ['fam', 'family']:
+ family = int(value.lstrip('0x'), 16)
+
+ # Features
+ flag_lines = []
+ for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
+ if category in output:
+ flag_lines.append(output.split(category)[1].split('\n')[0])
+
+ flags = []
+ for line in flag_lines:
+ line = line.split('<')[1].split('>')[0].lower()
+ for flag in line.split(','):
+ flags.append(flag)
+ flags.sort()
+
+ # Convert from GHz/MHz string to Hz
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+
+ # If advertised hz not found, use the actual hz
+ if hz_advertised == '0.0':
+ scale = 6
+ hz_advertised = _to_decimal_string(hz_actual)
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ 'flags' : flags
+ }
+
+ if hz_advertised and hz_advertised != '0.0':
+ info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
+ info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
+
+ if hz_advertised and hz_advertised != '0.0':
+ info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
+ info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
+
+ return {k: v for k, v in info.items() if v}
+ except Exception as err:
+ g_trace.fail(err)
+ #raise
+
+ return {}
+
+def _parse_arch(arch_string_raw):
+ import re
+
+ arch, bits = None, None
+ arch_string_raw = arch_string_raw.lower()
+
+ # X86
+ if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
+ arch = 'X86_32'
+ bits = 32
+ elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
+ arch = 'X86_64'
+ bits = 64
+ # ARM
+ elif re.match(r'^armv8-a|aarch64|arm64$', arch_string_raw):
+ arch = 'ARM_8'
+ bits = 64
+ elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
+ arch = 'ARM_7'
+ bits = 32
+ elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
+ arch = 'ARM_8'
+ bits = 32
+ # PPC
+ elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
+ arch = 'PPC_32'
+ bits = 32
+ elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
+ arch = 'PPC_64'
+ bits = 64
+ # SPARC
+ elif re.match(r'^sparc32$|^sparc$', arch_string_raw):
+ arch = 'SPARC_32'
+ bits = 32
+ elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
+ arch = 'SPARC_64'
+ bits = 64
+ # S390X
+ elif re.match(r'^s390x$', arch_string_raw):
+ arch = 'S390X'
+ bits = 64
+ elif arch_string_raw == 'mips':
+ arch = 'MIPS_32'
+ bits = 32
+ elif arch_string_raw == 'mips64':
+ arch = 'MIPS_64'
+ bits = 64
+ # RISCV
+ elif re.match(r'^riscv$|^riscv32$|^riscv32be$', arch_string_raw):
+ arch = 'RISCV_32'
+ bits = 32
+ elif re.match(r'^riscv64$|^riscv64be$', arch_string_raw):
+ arch = 'RISCV_64'
+ bits = 64
+
+ return (arch, bits)
+
+def _is_bit_set(reg, bit):
+ mask = 1 << bit
+ is_set = reg & mask > 0
+ return is_set
+
+
+def _is_selinux_enforcing(trace):
+ # Just return if the SE Linux Status Tool is not installed
+ if not DataSource.has_sestatus():
+ trace.fail('Failed to find sestatus.')
+ return False
+
+ # Run the sestatus, and just return if it failed to run
+ returncode, output = DataSource.sestatus_b()
+ if returncode != 0:
+ trace.fail('Failed to run sestatus. Skipping ...')
+ return False
+
+ # Figure out if explicitly in enforcing mode
+ for line in output.splitlines():
+ line = line.strip().lower()
+ if line.startswith("current mode:"):
+ if line.endswith("enforcing"):
+ return True
+ else:
+ return False
+
+ # Figure out if we can execute heap and execute memory
+ can_selinux_exec_heap = False
+ can_selinux_exec_memory = False
+ for line in output.splitlines():
+ line = line.strip().lower()
+ if line.startswith("allow_execheap") and line.endswith("on"):
+ can_selinux_exec_heap = True
+ elif line.startswith("allow_execmem") and line.endswith("on"):
+ can_selinux_exec_memory = True
+
+ trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap)
+ trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory)
+
+ return (not can_selinux_exec_heap or not can_selinux_exec_memory)
+
+def _filter_dict_keys_with_empty_values(info, acceptable_values = {}):
+ filtered_info = {}
+ for key in info:
+ value = info[key]
+
+ # Keep if value is acceptable
+ if key in acceptable_values:
+ if acceptable_values[key] == value:
+ filtered_info[key] = value
+ continue
+
+ # Filter out None, 0, "", (), {}, []
+ if not value:
+ continue
+
+ # Filter out (0, 0)
+ if value == (0, 0):
+ continue
+
+ # Filter out -1
+ if value == -1:
+ continue
+
+ # Filter out strings that start with "0.0"
+ if type(value) == str and value.startswith('0.0'):
+ continue
+
+ filtered_info[key] = value
+
+ return filtered_info
+
+class ASM(object):
+ def __init__(self, restype=None, argtypes=(), machine_code=[]):
+ self.restype = restype
+ self.argtypes = argtypes
+ self.machine_code = machine_code
+ self.prochandle = None
+ self.mm = None
+ self.func = None
+ self.address = None
+ self.size = 0
+
+ def compile(self):
+ machine_code = bytes.join(b'', self.machine_code)
+ self.size = ctypes.c_size_t(len(machine_code))
+
+ if DataSource.is_windows:
+ # Allocate a memory segment the size of the machine code, and make it executable
+ size = len(machine_code)
+ # Alloc at least 1 page to ensure we own all pages that we want to change protection on
+ if size < 0x1000: size = 0x1000
+ MEM_COMMIT = ctypes.c_ulong(0x1000)
+ PAGE_READWRITE = ctypes.c_ulong(0x4)
+ pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
+ pfnVirtualAlloc.restype = ctypes.c_void_p
+ self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
+ if not self.address:
+ raise Exception("Failed to VirtualAlloc")
+
+ # Copy the machine code into the memory segment
+ memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
+ if memmove(self.address, machine_code, size) < 0:
+ raise Exception("Failed to memmove")
+
+ # Enable execute permissions
+ PAGE_EXECUTE = ctypes.c_ulong(0x10)
+ old_protect = ctypes.c_ulong(0)
+ pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
+ res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
+ if not res:
+ raise Exception("Failed VirtualProtect")
+
+ # Flush Instruction Cache
+ # First, get process Handle
+ if not self.prochandle:
+ pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
+ pfnGetCurrentProcess.restype = ctypes.c_void_p
+ self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
+ # Actually flush cache
+ res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size))
+ if not res:
+ raise Exception("Failed FlushInstructionCache")
+ else:
+ from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC
+
+ # Allocate a private and executable memory segment the size of the machine code
+ machine_code = bytes.join(b'', self.machine_code)
+ self.size = len(machine_code)
+ self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC)
+
+ # Copy the machine code into the memory segment
+ self.mm.write(machine_code)
+ self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
+
+ # Cast the memory segment into a function
+ functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes)
+ self.func = functype(self.address)
+
+ def run(self):
+ # Call the machine code like a function
+ retval = self.func()
+
+ return retval
+
+ def free(self):
+ # Free the function memory segment
+ if DataSource.is_windows:
+ MEM_RELEASE = ctypes.c_ulong(0x8000)
+ ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE)
+ else:
+ self.mm.close()
+
+ self.prochandle = None
+ self.mm = None
+ self.func = None
+ self.address = None
+ self.size = 0
+
+
+class CPUID(object):
+ def __init__(self, trace=None):
+ if trace is None:
+ trace = Trace(False, False)
+
+ # Figure out if SE Linux is on and in enforcing mode
+ self.is_selinux_enforcing = _is_selinux_enforcing(trace)
+
+ def _asm_func(self, restype=None, argtypes=(), machine_code=[]):
+ asm = ASM(restype, argtypes, machine_code)
+ asm.compile()
+ return asm
+
+ def _run_asm(self, *machine_code):
+ asm = ASM(ctypes.c_uint32, (), machine_code)
+ asm.compile()
+ retval = asm.run()
+ asm.free()
+ return retval
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+ def get_vendor_id(self):
+ # EBX
+ ebx = self._run_asm(
+ b"\x31\xC0", # xor eax,eax
+ b"\x0F\xA2" # cpuid
+ b"\x89\xD8" # mov ax,bx
+ b"\xC3" # ret
+ )
+
+ # ECX
+ ecx = self._run_asm(
+ b"\x31\xC0", # xor eax,eax
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ # EDX
+ edx = self._run_asm(
+ b"\x31\xC0", # xor eax,eax
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD0" # mov ax,dx
+ b"\xC3" # ret
+ )
+
+ # Each 4bits is a ascii letter in the name
+ vendor_id = []
+ for reg in [ebx, edx, ecx]:
+ for n in [0, 8, 16, 24]:
+ vendor_id.append(chr((reg >> n) & 0xFF))
+ vendor_id = ''.join(vendor_id)
+
+ return vendor_id
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
+ def get_info(self):
+ # EAX
+ eax = self._run_asm(
+ b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
+ b"\x0f\xa2" # cpuid
+ b"\xC3" # ret
+ )
+
+ # Get the CPU info
+ stepping_id = (eax >> 0) & 0xF # 4 bits
+ model = (eax >> 4) & 0xF # 4 bits
+ family_id = (eax >> 8) & 0xF # 4 bits
+ processor_type = (eax >> 12) & 0x3 # 2 bits
+ extended_model_id = (eax >> 16) & 0xF # 4 bits
+ extended_family_id = (eax >> 20) & 0xFF # 8 bits
+ family = 0
+
+ if family_id in [15]:
+ family = extended_family_id + family_id
+ else:
+ family = family_id
+
+ if family_id in [6, 15]:
+ model = (extended_model_id << 4) + model
+
+ return {
+ 'stepping' : stepping_id,
+ 'model' : model,
+ 'family' : family,
+ 'processor_type' : processor_type
+ }
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
+ def get_max_extension_support(self):
+ # Check for extension support
+ max_extension_support = self._run_asm(
+ b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
+ b"\x0f\xa2" # cpuid
+ b"\xC3" # ret
+ )
+
+ return max_extension_support
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
+ def get_flags(self, max_extension_support):
+ # EDX
+ edx = self._run_asm(
+ b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD0" # mov ax,dx
+ b"\xC3" # ret
+ )
+
+ # ECX
+ ecx = self._run_asm(
+ b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ # Get the CPU flags
+ flags = {
+ 'fpu' : _is_bit_set(edx, 0),
+ 'vme' : _is_bit_set(edx, 1),
+ 'de' : _is_bit_set(edx, 2),
+ 'pse' : _is_bit_set(edx, 3),
+ 'tsc' : _is_bit_set(edx, 4),
+ 'msr' : _is_bit_set(edx, 5),
+ 'pae' : _is_bit_set(edx, 6),
+ 'mce' : _is_bit_set(edx, 7),
+ 'cx8' : _is_bit_set(edx, 8),
+ 'apic' : _is_bit_set(edx, 9),
+ #'reserved1' : _is_bit_set(edx, 10),
+ 'sep' : _is_bit_set(edx, 11),
+ 'mtrr' : _is_bit_set(edx, 12),
+ 'pge' : _is_bit_set(edx, 13),
+ 'mca' : _is_bit_set(edx, 14),
+ 'cmov' : _is_bit_set(edx, 15),
+ 'pat' : _is_bit_set(edx, 16),
+ 'pse36' : _is_bit_set(edx, 17),
+ 'pn' : _is_bit_set(edx, 18),
+ 'clflush' : _is_bit_set(edx, 19),
+ #'reserved2' : _is_bit_set(edx, 20),
+ 'dts' : _is_bit_set(edx, 21),
+ 'acpi' : _is_bit_set(edx, 22),
+ 'mmx' : _is_bit_set(edx, 23),
+ 'fxsr' : _is_bit_set(edx, 24),
+ 'sse' : _is_bit_set(edx, 25),
+ 'sse2' : _is_bit_set(edx, 26),
+ 'ss' : _is_bit_set(edx, 27),
+ 'ht' : _is_bit_set(edx, 28),
+ 'tm' : _is_bit_set(edx, 29),
+ 'ia64' : _is_bit_set(edx, 30),
+ 'pbe' : _is_bit_set(edx, 31),
+
+ 'pni' : _is_bit_set(ecx, 0),
+ 'pclmulqdq' : _is_bit_set(ecx, 1),
+ 'dtes64' : _is_bit_set(ecx, 2),
+ 'monitor' : _is_bit_set(ecx, 3),
+ 'ds_cpl' : _is_bit_set(ecx, 4),
+ 'vmx' : _is_bit_set(ecx, 5),
+ 'smx' : _is_bit_set(ecx, 6),
+ 'est' : _is_bit_set(ecx, 7),
+ 'tm2' : _is_bit_set(ecx, 8),
+ 'ssse3' : _is_bit_set(ecx, 9),
+ 'cid' : _is_bit_set(ecx, 10),
+ #'reserved3' : _is_bit_set(ecx, 11),
+ 'fma' : _is_bit_set(ecx, 12),
+ 'cx16' : _is_bit_set(ecx, 13),
+ 'xtpr' : _is_bit_set(ecx, 14),
+ 'pdcm' : _is_bit_set(ecx, 15),
+ #'reserved4' : _is_bit_set(ecx, 16),
+ 'pcid' : _is_bit_set(ecx, 17),
+ 'dca' : _is_bit_set(ecx, 18),
+ 'sse4_1' : _is_bit_set(ecx, 19),
+ 'sse4_2' : _is_bit_set(ecx, 20),
+ 'x2apic' : _is_bit_set(ecx, 21),
+ 'movbe' : _is_bit_set(ecx, 22),
+ 'popcnt' : _is_bit_set(ecx, 23),
+ 'tscdeadline' : _is_bit_set(ecx, 24),
+ 'aes' : _is_bit_set(ecx, 25),
+ 'xsave' : _is_bit_set(ecx, 26),
+ 'osxsave' : _is_bit_set(ecx, 27),
+ 'avx' : _is_bit_set(ecx, 28),
+ 'f16c' : _is_bit_set(ecx, 29),
+ 'rdrnd' : _is_bit_set(ecx, 30),
+ 'hypervisor' : _is_bit_set(ecx, 31)
+ }
+
+ # Get a list of only the flags that are true
+ flags = [k for k, v in flags.items() if v]
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
+ if max_extension_support >= 7:
+ # EBX
+ ebx = self._run_asm(
+ b"\x31\xC9", # xor ecx,ecx
+ b"\xB8\x07\x00\x00\x00" # mov eax,7
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD8" # mov ax,bx
+ b"\xC3" # ret
+ )
+
+ # ECX
+ ecx = self._run_asm(
+ b"\x31\xC9", # xor ecx,ecx
+ b"\xB8\x07\x00\x00\x00" # mov eax,7
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ # Get the extended CPU flags
+ extended_flags = {
+ #'fsgsbase' : _is_bit_set(ebx, 0),
+ #'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
+ 'sgx' : _is_bit_set(ebx, 2),
+ 'bmi1' : _is_bit_set(ebx, 3),
+ 'hle' : _is_bit_set(ebx, 4),
+ 'avx2' : _is_bit_set(ebx, 5),
+ #'reserved' : _is_bit_set(ebx, 6),
+ 'smep' : _is_bit_set(ebx, 7),
+ 'bmi2' : _is_bit_set(ebx, 8),
+ 'erms' : _is_bit_set(ebx, 9),
+ 'invpcid' : _is_bit_set(ebx, 10),
+ 'rtm' : _is_bit_set(ebx, 11),
+ 'pqm' : _is_bit_set(ebx, 12),
+ #'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
+ 'mpx' : _is_bit_set(ebx, 14),
+ 'pqe' : _is_bit_set(ebx, 15),
+ 'avx512f' : _is_bit_set(ebx, 16),
+ 'avx512dq' : _is_bit_set(ebx, 17),
+ 'rdseed' : _is_bit_set(ebx, 18),
+ 'adx' : _is_bit_set(ebx, 19),
+ 'smap' : _is_bit_set(ebx, 20),
+ 'avx512ifma' : _is_bit_set(ebx, 21),
+ 'pcommit' : _is_bit_set(ebx, 22),
+ 'clflushopt' : _is_bit_set(ebx, 23),
+ 'clwb' : _is_bit_set(ebx, 24),
+ 'intel_pt' : _is_bit_set(ebx, 25),
+ 'avx512pf' : _is_bit_set(ebx, 26),
+ 'avx512er' : _is_bit_set(ebx, 27),
+ 'avx512cd' : _is_bit_set(ebx, 28),
+ 'sha' : _is_bit_set(ebx, 29),
+ 'avx512bw' : _is_bit_set(ebx, 30),
+ 'avx512vl' : _is_bit_set(ebx, 31),
+
+ 'prefetchwt1' : _is_bit_set(ecx, 0),
+ 'avx512vbmi' : _is_bit_set(ecx, 1),
+ 'umip' : _is_bit_set(ecx, 2),
+ 'pku' : _is_bit_set(ecx, 3),
+ 'ospke' : _is_bit_set(ecx, 4),
+ #'reserved' : _is_bit_set(ecx, 5),
+ 'avx512vbmi2' : _is_bit_set(ecx, 6),
+ #'reserved' : _is_bit_set(ecx, 7),
+ 'gfni' : _is_bit_set(ecx, 8),
+ 'vaes' : _is_bit_set(ecx, 9),
+ 'vpclmulqdq' : _is_bit_set(ecx, 10),
+ 'avx512vnni' : _is_bit_set(ecx, 11),
+ 'avx512bitalg' : _is_bit_set(ecx, 12),
+ #'reserved' : _is_bit_set(ecx, 13),
+ 'avx512vpopcntdq' : _is_bit_set(ecx, 14),
+ #'reserved' : _is_bit_set(ecx, 15),
+ #'reserved' : _is_bit_set(ecx, 16),
+ #'mpx0' : _is_bit_set(ecx, 17),
+ #'mpx1' : _is_bit_set(ecx, 18),
+ #'mpx2' : _is_bit_set(ecx, 19),
+ #'mpx3' : _is_bit_set(ecx, 20),
+ #'mpx4' : _is_bit_set(ecx, 21),
+ 'rdpid' : _is_bit_set(ecx, 22),
+ #'reserved' : _is_bit_set(ecx, 23),
+ #'reserved' : _is_bit_set(ecx, 24),
+ #'reserved' : _is_bit_set(ecx, 25),
+ #'reserved' : _is_bit_set(ecx, 26),
+ #'reserved' : _is_bit_set(ecx, 27),
+ #'reserved' : _is_bit_set(ecx, 28),
+ #'reserved' : _is_bit_set(ecx, 29),
+ 'sgx_lc' : _is_bit_set(ecx, 30),
+ #'reserved' : _is_bit_set(ecx, 31)
+ }
+
+ # Get a list of only the flags that are true
+ extended_flags = [k for k, v in extended_flags.items() if v]
+ flags += extended_flags
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
+ if max_extension_support >= 0x80000001:
+ # EBX
+ ebx = self._run_asm(
+ b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD8" # mov ax,bx
+ b"\xC3" # ret
+ )
+
+ # ECX
+ ecx = self._run_asm(
+ b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ # Get the extended CPU flags
+ extended_flags = {
+ 'fpu' : _is_bit_set(ebx, 0),
+ 'vme' : _is_bit_set(ebx, 1),
+ 'de' : _is_bit_set(ebx, 2),
+ 'pse' : _is_bit_set(ebx, 3),
+ 'tsc' : _is_bit_set(ebx, 4),
+ 'msr' : _is_bit_set(ebx, 5),
+ 'pae' : _is_bit_set(ebx, 6),
+ 'mce' : _is_bit_set(ebx, 7),
+ 'cx8' : _is_bit_set(ebx, 8),
+ 'apic' : _is_bit_set(ebx, 9),
+ #'reserved' : _is_bit_set(ebx, 10),
+ 'syscall' : _is_bit_set(ebx, 11),
+ 'mtrr' : _is_bit_set(ebx, 12),
+ 'pge' : _is_bit_set(ebx, 13),
+ 'mca' : _is_bit_set(ebx, 14),
+ 'cmov' : _is_bit_set(ebx, 15),
+ 'pat' : _is_bit_set(ebx, 16),
+ 'pse36' : _is_bit_set(ebx, 17),
+ #'reserved' : _is_bit_set(ebx, 18),
+ 'mp' : _is_bit_set(ebx, 19),
+ 'nx' : _is_bit_set(ebx, 20),
+ #'reserved' : _is_bit_set(ebx, 21),
+ 'mmxext' : _is_bit_set(ebx, 22),
+ 'mmx' : _is_bit_set(ebx, 23),
+ 'fxsr' : _is_bit_set(ebx, 24),
+ 'fxsr_opt' : _is_bit_set(ebx, 25),
+ 'pdpe1gp' : _is_bit_set(ebx, 26),
+ 'rdtscp' : _is_bit_set(ebx, 27),
+ #'reserved' : _is_bit_set(ebx, 28),
+ 'lm' : _is_bit_set(ebx, 29),
+ '3dnowext' : _is_bit_set(ebx, 30),
+ '3dnow' : _is_bit_set(ebx, 31),
+
+ 'lahf_lm' : _is_bit_set(ecx, 0),
+ 'cmp_legacy' : _is_bit_set(ecx, 1),
+ 'svm' : _is_bit_set(ecx, 2),
+ 'extapic' : _is_bit_set(ecx, 3),
+ 'cr8_legacy' : _is_bit_set(ecx, 4),
+ 'abm' : _is_bit_set(ecx, 5),
+ 'sse4a' : _is_bit_set(ecx, 6),
+ 'misalignsse' : _is_bit_set(ecx, 7),
+ '3dnowprefetch' : _is_bit_set(ecx, 8),
+ 'osvw' : _is_bit_set(ecx, 9),
+ 'ibs' : _is_bit_set(ecx, 10),
+ 'xop' : _is_bit_set(ecx, 11),
+ 'skinit' : _is_bit_set(ecx, 12),
+ 'wdt' : _is_bit_set(ecx, 13),
+ #'reserved' : _is_bit_set(ecx, 14),
+ 'lwp' : _is_bit_set(ecx, 15),
+ 'fma4' : _is_bit_set(ecx, 16),
+ 'tce' : _is_bit_set(ecx, 17),
+ #'reserved' : _is_bit_set(ecx, 18),
+ 'nodeid_msr' : _is_bit_set(ecx, 19),
+ #'reserved' : _is_bit_set(ecx, 20),
+ 'tbm' : _is_bit_set(ecx, 21),
+ 'topoext' : _is_bit_set(ecx, 22),
+ 'perfctr_core' : _is_bit_set(ecx, 23),
+ 'perfctr_nb' : _is_bit_set(ecx, 24),
+ #'reserved' : _is_bit_set(ecx, 25),
+ 'dbx' : _is_bit_set(ecx, 26),
+ 'perftsc' : _is_bit_set(ecx, 27),
+ 'pci_l2i' : _is_bit_set(ecx, 28),
+ #'reserved' : _is_bit_set(ecx, 29),
+ #'reserved' : _is_bit_set(ecx, 30),
+ #'reserved' : _is_bit_set(ecx, 31)
+ }
+
+ # Get a list of only the flags that are true
+ extended_flags = [k for k, v in extended_flags.items() if v]
+ flags += extended_flags
+
+ flags.sort()
+ return flags
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
+ def get_processor_brand(self, max_extension_support):
+ processor_brand = ""
+
+ # Processor brand string
+ if max_extension_support >= 0x80000004:
+ instructions = [
+ b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
+ b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
+ b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
+ ]
+ for instruction in instructions:
+ # EAX
+ eax = self._run_asm(
+ instruction, # mov ax,0x8000000?
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC0" # mov ax,ax
+ b"\xC3" # ret
+ )
+
+ # EBX
+ ebx = self._run_asm(
+ instruction, # mov ax,0x8000000?
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD8" # mov ax,bx
+ b"\xC3" # ret
+ )
+
+ # ECX
+ ecx = self._run_asm(
+ instruction, # mov ax,0x8000000?
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ # EDX
+ edx = self._run_asm(
+ instruction, # mov ax,0x8000000?
+ b"\x0f\xa2" # cpuid
+ b"\x89\xD0" # mov ax,dx
+ b"\xC3" # ret
+ )
+
+ # Combine each of the 4 bytes in each register into the string
+ for reg in [eax, ebx, ecx, edx]:
+ for n in [0, 8, 16, 24]:
+ processor_brand += chr((reg >> n) & 0xFF)
+
+ # Strip off any trailing NULL terminators and white space
+ processor_brand = processor_brand.strip("\0").strip()
+
+ return processor_brand
+
+ # http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
+ def get_cache(self, max_extension_support):
+ cache_info = {}
+
+ # Just return if the cache feature is not supported
+ if max_extension_support < 0x80000006:
+ return cache_info
+
+ # ECX
+ ecx = self._run_asm(
+ b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
+ b"\x0f\xa2" # cpuid
+ b"\x89\xC8" # mov ax,cx
+ b"\xC3" # ret
+ )
+
+ cache_info = {
+ 'size_b' : (ecx & 0xFF) * 1024,
+ 'associativity' : (ecx >> 12) & 0xF,
+ 'line_size_b' : (ecx >> 16) & 0xFFFF
+ }
+
+ return cache_info
+
+ def get_ticks_func(self):
+ retval = None
+
+ if DataSource.bits == '32bit':
+ # Works on x86_32
+ restype = None
+ argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
+ get_ticks_x86_32 = self._asm_func(restype, argtypes,
+ [
+ b"\x55", # push bp
+ b"\x89\xE5", # mov bp,sp
+ b"\x31\xC0", # xor ax,ax
+ b"\x0F\xA2", # cpuid
+ b"\x0F\x31", # rdtsc
+ b"\x8B\x5D\x08", # mov bx,[di+0x8]
+ b"\x8B\x4D\x0C", # mov cx,[di+0xc]
+ b"\x89\x13", # mov [bp+di],dx
+ b"\x89\x01", # mov [bx+di],ax
+ b"\x5D", # pop bp
+ b"\xC3" # ret
+ ]
+ )
+
+ # Monkey patch func to combine high and low args into one return
+ old_func = get_ticks_x86_32.func
+ def new_func():
+ # Pass two uint32s into function
+ high = ctypes.c_uint32(0)
+ low = ctypes.c_uint32(0)
+ old_func(ctypes.byref(high), ctypes.byref(low))
+
+ # Shift the two uint32s into one uint64
+ retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
+ return retval
+ get_ticks_x86_32.func = new_func
+
+ retval = get_ticks_x86_32
+ elif DataSource.bits == '64bit':
+ # Works on x86_64
+ restype = ctypes.c_uint64
+ argtypes = ()
+ get_ticks_x86_64 = self._asm_func(restype, argtypes,
+ [
+ b"\x48", # dec ax
+ b"\x31\xC0", # xor ax,ax
+ b"\x0F\xA2", # cpuid
+ b"\x0F\x31", # rdtsc
+ b"\x48", # dec ax
+ b"\xC1\xE2\x20", # shl dx,byte 0x20
+ b"\x48", # dec ax
+ b"\x09\xD0", # or ax,dx
+ b"\xC3", # ret
+ ]
+ )
+
+ retval = get_ticks_x86_64
+ return retval
+
+ def get_raw_hz(self):
+ from time import sleep
+
+ ticks_fn = self.get_ticks_func()
+
+ start = ticks_fn.func()
+ sleep(1)
+ end = ticks_fn.func()
+
+ ticks = (end - start)
+ ticks_fn.free()
+
+ return ticks
+
+def _get_cpu_info_from_cpuid_actual():
+ '''
+ Warning! This function has the potential to crash the Python runtime.
+ Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
+ It will safely call this function in another process.
+ '''
+
+ from io import StringIO
+
+ trace = Trace(True, True)
+ info = {}
+
+ # Pipe stdout and stderr to strings
+ sys.stdout = trace._stdout
+ sys.stderr = trace._stderr
+
+ try:
+ # Get the CPU arch and bits
+ arch, bits = _parse_arch(DataSource.arch_string_raw)
+
+ # Return none if this is not an X86 CPU
+ if not arch in ['X86_32', 'X86_64']:
+ trace.fail('Not running on X86_32 or X86_64. Skipping ...')
+ return trace.to_dict(info, True)
+
+ # Return none if SE Linux is in enforcing mode
+ cpuid = CPUID(trace)
+ if cpuid.is_selinux_enforcing:
+ trace.fail('SELinux is enforcing. Skipping ...')
+ return trace.to_dict(info, True)
+
+ # Get the cpu info from the CPUID register
+ max_extension_support = cpuid.get_max_extension_support()
+ cache_info = cpuid.get_cache(max_extension_support)
+ info = cpuid.get_info()
+
+ processor_brand = cpuid.get_processor_brand(max_extension_support)
+
+ # Get the Hz and scale
+ hz_actual = cpuid.get_raw_hz()
+ hz_actual = _to_decimal_string(hz_actual)
+
+ # Get the Hz and scale
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+ info = {
+ 'vendor_id_raw' : cpuid.get_vendor_id(),
+ 'hardware_raw' : '',
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, 0),
+
+ 'l2_cache_size' : cache_info['size_b'],
+ 'l2_cache_line_size' : cache_info['line_size_b'],
+ 'l2_cache_associativity' : cache_info['associativity'],
+
+ 'stepping' : info['stepping'],
+ 'model' : info['model'],
+ 'family' : info['family'],
+ 'processor_type' : info['processor_type'],
+ 'flags' : cpuid.get_flags(max_extension_support)
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ trace.success()
+ except Exception as err:
+ from traceback import format_exc
+ err_string = format_exc()
+ trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n'
+ return trace.to_dict(info, True)
+
+ return trace.to_dict(info, False)
+
+def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+
+ output = _get_cpu_info_from_cpuid_actual()
+
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+
+ queue.put(_obj_to_b64(output))
+
+def _get_cpu_info_from_cpuid():
+ '''
+ Returns the CPU info gathered by querying the X86 cpuid register in a new process.
+ Returns {} on non X86 cpus.
+ Returns {} if SELinux is in enforcing mode.
+ '''
+
+ g_trace.header('Tying to get info from CPUID ...')
+
+ from multiprocessing import Process, Queue
+
+ # Return {} if can't cpuid
+ if not DataSource.can_cpuid:
+ g_trace.fail('Can\'t CPUID. Skipping ...')
+ return {}
+
+ # Get the CPU arch and bits
+ arch, bits = _parse_arch(DataSource.arch_string_raw)
+
+ # Return {} if this is not an X86 CPU
+ if not arch in ['X86_32', 'X86_64']:
+ g_trace.fail('Not running on X86_32 or X86_64. Skipping ...')
+ return {}
+
+ try:
+ if CAN_CALL_CPUID_IN_SUBPROCESS:
+ # Start running the function in a subprocess
+ queue = Queue()
+ p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
+ p.start()
+
+ # Wait for the process to end, while it is still alive
+ while p.is_alive():
+ p.join(0)
+
+ # Return {} if it failed
+ if p.exitcode != 0:
+ g_trace.fail('Failed to run CPUID in process. Skipping ...')
+ return {}
+
+ # Return {} if no results
+ if queue.empty():
+ g_trace.fail('Failed to get anything from CPUID process. Skipping ...')
+ return {}
+ # Return the result, only if there is something to read
+ else:
+ output = _b64_to_obj(queue.get())
+ import pprint
+ pp = pprint.PrettyPrinter(indent=4)
+ #pp.pprint(output)
+
+ if 'output' in output and output['output']:
+ g_trace.write(output['output'])
+
+ if 'stdout' in output and output['stdout']:
+ sys.stdout.write('{0}\n'.format(output['stdout']))
+ sys.stdout.flush()
+
+ if 'stderr' in output and output['stderr']:
+ sys.stderr.write('{0}\n'.format(output['stderr']))
+ sys.stderr.flush()
+
+ if 'is_fail' not in output:
+ g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...')
+ return {}
+
+ # Fail if there was an exception
+ if 'err' in output and output['err']:
+ g_trace.fail('Failed to run CPUID in process. Skipping ...')
+ g_trace.write(output['err'])
+ g_trace.write('Failed ...')
+ return {}
+
+ if 'is_fail' in output and output['is_fail']:
+ g_trace.write('Failed ...')
+ return {}
+
+ if 'info' not in output or not output['info']:
+ g_trace.fail('Failed to get return info from CPUID process. Skipping ...')
+ return {}
+
+ return output['info']
+ else:
+ # FIXME: This should write the values like in the above call to actual
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+
+ output = _get_cpu_info_from_cpuid_actual()
+
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+
+ g_trace.success()
+ return output['info']
+ except Exception as err:
+ g_trace.fail(err)
+
+ # Return {} if everything failed
+ return {}
+
+def _get_cpu_info_from_proc_cpuinfo():
+ '''
+ Returns the CPU info gathered from /proc/cpuinfo.
+ Returns {} if /proc/cpuinfo is not found.
+ '''
+
+ g_trace.header('Tying to get info from /proc/cpuinfo ...')
+
+ try:
+ # Just return {} if there is no cpuinfo
+ if not DataSource.has_proc_cpuinfo():
+ g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...')
+ return {}
+
+ returncode, output = DataSource.cat_proc_cpuinfo()
+ if returncode != 0:
+ g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...')
+ return {}
+
+ # Various fields
+ vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
+ processor_brand = _get_field(True, output, None, None, 'model name', 'cpu', 'processor', 'uarch')
+ cache_size = _get_field(False, output, None, '', 'cache size')
+ stepping = _get_field(False, output, int, -1, 'stepping')
+ model = _get_field(False, output, int, -1, 'model')
+ family = _get_field(False, output, int, -1, 'cpu family')
+ hardware = _get_field(False, output, None, '', 'Hardware')
+
+ # Flags
+ flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
+ if flags:
+ flags = flags.split()
+ flags.sort()
+
+ # Check for other cache format
+ if not cache_size:
+ try:
+ for i in range(0, 10):
+ name = "cache{0}".format(i)
+ value = _get_field(False, output, None, None, name)
+ if value:
+ value = [entry.split('=') for entry in value.split(' ')]
+ value = dict(value)
+ if 'level' in value and value['level'] == '3' and 'size' in value:
+ cache_size = value['size']
+ break
+ except Exception:
+ pass
+
+ # Convert from MHz string to Hz
+ hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
+ hz_actual = hz_actual.lower().rstrip('mhz').strip()
+ hz_actual = _to_decimal_string(hz_actual)
+
+ # Convert from GHz/MHz string to Hz
+ hz_advertised, scale = (None, 0)
+ try:
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+ except Exception:
+ pass
+
+ info = {
+ 'hardware_raw' : hardware,
+ 'brand_raw' : processor_brand,
+
+ 'l3_cache_size' : _friendly_bytes_to_int(cache_size),
+ 'flags' : flags,
+ 'vendor_id_raw' : vendor_id,
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ }
+
+ # Make the Hz the same for actual and advertised if missing any
+ if not hz_advertised or hz_advertised == '0.0':
+ hz_advertised = hz_actual
+ scale = 6
+ elif not hz_actual or hz_actual == '0.0':
+ hz_actual = hz_advertised
+
+ # Add the Hz if there is one
+ if _hz_short_to_full(hz_advertised, scale) > (0, 0):
+ info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
+ info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
+ if _hz_short_to_full(hz_actual, scale) > (0, 0):
+ info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
+ info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
+
+ info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0})
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_cpufreq_info():
+ '''
+ Returns the CPU info gathered from cpufreq-info.
+ Returns {} if cpufreq-info is not found.
+ '''
+
+ g_trace.header('Tying to get info from cpufreq-info ...')
+
+ try:
+ hz_brand, scale = '0.0', 0
+
+ if not DataSource.has_cpufreq_info():
+ g_trace.fail('Failed to find cpufreq-info. Skipping ...')
+ return {}
+
+ returncode, output = DataSource.cpufreq_info()
+ if returncode != 0:
+ g_trace.fail('Failed to run cpufreq-info. Skipping ...')
+ return {}
+
+ hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
+ i = hz_brand.find('Hz')
+ assert(i != -1)
+ hz_brand = hz_brand[0 : i+2].strip().lower()
+
+ if hz_brand.endswith('mhz'):
+ scale = 6
+ elif hz_brand.endswith('ghz'):
+ scale = 9
+ hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
+ hz_brand = _to_decimal_string(hz_brand)
+
+ info = {
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
+ 'hz_advertised' : _hz_short_to_full(hz_brand, scale),
+ 'hz_actual' : _hz_short_to_full(hz_brand, scale),
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_lscpu():
+ '''
+ Returns the CPU info gathered from lscpu.
+ Returns {} if lscpu is not found.
+ '''
+
+ g_trace.header('Tying to get info from lscpu ...')
+
+ try:
+ if not DataSource.has_lscpu():
+ g_trace.fail('Failed to find lscpu. Skipping ...')
+ return {}
+
+ returncode, output = DataSource.lscpu()
+ if returncode != 0:
+ g_trace.fail('Failed to run lscpu. Skipping ...')
+ return {}
+
+ info = {}
+
+ new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
+ if new_hz:
+ new_hz = _to_decimal_string(new_hz)
+ scale = 6
+ info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
+ info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
+ info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
+ info['hz_actual'] = _hz_short_to_full(new_hz, scale)
+
+ new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
+ if new_hz:
+ new_hz = _to_decimal_string(new_hz)
+ scale = 6
+ info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
+ info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
+ info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
+ info['hz_actual'] = _hz_short_to_full(new_hz, scale)
+
+ vendor_id = _get_field(False, output, None, None, 'Vendor ID')
+ if vendor_id:
+ info['vendor_id_raw'] = vendor_id
+
+ brand = _get_field(False, output, None, None, 'Model name')
+ if brand:
+ info['brand_raw'] = brand
+ else:
+ brand = _get_field(False, output, None, None, 'Model')
+ if brand and not brand.isdigit():
+ info['brand_raw'] = brand
+
+ family = _get_field(False, output, None, None, 'CPU family')
+ if family and family.isdigit():
+ info['family'] = int(family)
+
+ stepping = _get_field(False, output, None, None, 'Stepping')
+ if stepping and stepping.isdigit():
+ info['stepping'] = int(stepping)
+
+ model = _get_field(False, output, None, None, 'Model')
+ if model and model.isdigit():
+ info['model'] = int(model)
+
+ l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
+ if l1_data_cache_size:
+ l1_data_cache_size = l1_data_cache_size.split('(')[0].strip()
+ info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size)
+
+ l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
+ if l1_instruction_cache_size:
+ l1_instruction_cache_size = l1_instruction_cache_size.split('(')[0].strip()
+ info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size)
+
+ l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
+ if l2_cache_size:
+ l2_cache_size = l2_cache_size.split('(')[0].strip()
+ info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size)
+
+ l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
+ if l3_cache_size:
+ l3_cache_size = l3_cache_size.split('(')[0].strip()
+ info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size)
+
+ # Flags
+ flags = _get_field(False, output, None, None, 'flags', 'Features', 'ASEs implemented')
+ if flags:
+ flags = flags.split()
+ flags.sort()
+ info['flags'] = flags
+
+ info = _filter_dict_keys_with_empty_values(info, {'stepping':0, 'model':0, 'family':0})
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_dmesg():
+ '''
+ Returns the CPU info gathered from dmesg.
+ Returns {} if dmesg is not found or does not have the desired info.
+ '''
+
+ g_trace.header('Tying to get info from the dmesg ...')
+
+ # Just return {} if this arch has an unreliable dmesg log
+ arch, bits = _parse_arch(DataSource.arch_string_raw)
+ if arch in ['S390X']:
+ g_trace.fail('Running on S390X. Skipping ...')
+ return {}
+
+ # Just return {} if there is no dmesg
+ if not DataSource.has_dmesg():
+ g_trace.fail('Failed to find dmesg. Skipping ...')
+ return {}
+
+ # If dmesg fails return {}
+ returncode, output = DataSource.dmesg_a()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run \"dmesg -a\". Skipping ...')
+ return {}
+
+ info = _parse_dmesg_output(output)
+ g_trace.success()
+ return info
+
+
+# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
+# page 767
+def _get_cpu_info_from_ibm_pa_features():
+ '''
+ Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
+ Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
+ '''
+
+ g_trace.header('Tying to get info from lsprop ...')
+
+ try:
+ # Just return {} if there is no lsprop
+ if not DataSource.has_ibm_pa_features():
+ g_trace.fail('Failed to find lsprop. Skipping ...')
+ return {}
+
+ # If ibm,pa-features fails return {}
+ returncode, output = DataSource.ibm_pa_features()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...')
+ return {}
+
+ # Filter out invalid characters from output
+ value = output.split("ibm,pa-features")[1].lower()
+ value = [s for s in value if s in list('0123456789abcfed')]
+ value = ''.join(value)
+
+ # Get data converted to Uint32 chunks
+ left = int(value[0 : 8], 16)
+ right = int(value[8 : 16], 16)
+
+ # Get the CPU flags
+ flags = {
+ # Byte 0
+ 'mmu' : _is_bit_set(left, 0),
+ 'fpu' : _is_bit_set(left, 1),
+ 'slb' : _is_bit_set(left, 2),
+ 'run' : _is_bit_set(left, 3),
+ #'reserved' : _is_bit_set(left, 4),
+ 'dabr' : _is_bit_set(left, 5),
+ 'ne' : _is_bit_set(left, 6),
+ 'wtr' : _is_bit_set(left, 7),
+
+ # Byte 1
+ 'mcr' : _is_bit_set(left, 8),
+ 'dsisr' : _is_bit_set(left, 9),
+ 'lp' : _is_bit_set(left, 10),
+ 'ri' : _is_bit_set(left, 11),
+ 'dabrx' : _is_bit_set(left, 12),
+ 'sprg3' : _is_bit_set(left, 13),
+ 'rislb' : _is_bit_set(left, 14),
+ 'pp' : _is_bit_set(left, 15),
+
+ # Byte 2
+ 'vpm' : _is_bit_set(left, 16),
+ 'dss_2.05' : _is_bit_set(left, 17),
+ #'reserved' : _is_bit_set(left, 18),
+ 'dar' : _is_bit_set(left, 19),
+ #'reserved' : _is_bit_set(left, 20),
+ 'ppr' : _is_bit_set(left, 21),
+ 'dss_2.02' : _is_bit_set(left, 22),
+ 'dss_2.06' : _is_bit_set(left, 23),
+
+ # Byte 3
+ 'lsd_in_dscr' : _is_bit_set(left, 24),
+ 'ugr_in_dscr' : _is_bit_set(left, 25),
+ #'reserved' : _is_bit_set(left, 26),
+ #'reserved' : _is_bit_set(left, 27),
+ #'reserved' : _is_bit_set(left, 28),
+ #'reserved' : _is_bit_set(left, 29),
+ #'reserved' : _is_bit_set(left, 30),
+ #'reserved' : _is_bit_set(left, 31),
+
+ # Byte 4
+ 'sso_2.06' : _is_bit_set(right, 0),
+ #'reserved' : _is_bit_set(right, 1),
+ #'reserved' : _is_bit_set(right, 2),
+ #'reserved' : _is_bit_set(right, 3),
+ #'reserved' : _is_bit_set(right, 4),
+ #'reserved' : _is_bit_set(right, 5),
+ #'reserved' : _is_bit_set(right, 6),
+ #'reserved' : _is_bit_set(right, 7),
+
+ # Byte 5
+ 'le' : _is_bit_set(right, 8),
+ 'cfar' : _is_bit_set(right, 9),
+ 'eb' : _is_bit_set(right, 10),
+ 'lsq_2.07' : _is_bit_set(right, 11),
+ #'reserved' : _is_bit_set(right, 12),
+ #'reserved' : _is_bit_set(right, 13),
+ #'reserved' : _is_bit_set(right, 14),
+ #'reserved' : _is_bit_set(right, 15),
+
+ # Byte 6
+ 'dss_2.07' : _is_bit_set(right, 16),
+ #'reserved' : _is_bit_set(right, 17),
+ #'reserved' : _is_bit_set(right, 18),
+ #'reserved' : _is_bit_set(right, 19),
+ #'reserved' : _is_bit_set(right, 20),
+ #'reserved' : _is_bit_set(right, 21),
+ #'reserved' : _is_bit_set(right, 22),
+ #'reserved' : _is_bit_set(right, 23),
+
+ # Byte 7
+ #'reserved' : _is_bit_set(right, 24),
+ #'reserved' : _is_bit_set(right, 25),
+ #'reserved' : _is_bit_set(right, 26),
+ #'reserved' : _is_bit_set(right, 27),
+ #'reserved' : _is_bit_set(right, 28),
+ #'reserved' : _is_bit_set(right, 29),
+ #'reserved' : _is_bit_set(right, 30),
+ #'reserved' : _is_bit_set(right, 31),
+ }
+
+ # Get a list of only the flags that are true
+ flags = [k for k, v in flags.items() if v]
+ flags.sort()
+
+ info = {
+ 'flags' : flags
+ }
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ return {}
+
+
+def _get_cpu_info_from_cat_var_run_dmesg_boot():
+ '''
+ Returns the CPU info gathered from /var/run/dmesg.boot.
+ Returns {} if dmesg is not found or does not have the desired info.
+ '''
+
+ g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...')
+
+ # Just return {} if there is no /var/run/dmesg.boot
+ if not DataSource.has_var_run_dmesg_boot():
+ g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...')
+ return {}
+
+ # If dmesg.boot fails return {}
+ returncode, output = DataSource.cat_var_run_dmesg_boot()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...')
+ return {}
+
+ info = _parse_dmesg_output(output)
+ g_trace.success()
+ return info
+
+
+def _get_cpu_info_from_sysctl():
+ '''
+ Returns the CPU info gathered from sysctl.
+ Returns {} if sysctl is not found.
+ '''
+
+ g_trace.header('Tying to get info from sysctl ...')
+
+ try:
+ # Just return {} if there is no sysctl
+ if not DataSource.has_sysctl():
+ g_trace.fail('Failed to find sysctl. Skipping ...')
+ return {}
+
+ # If sysctl fails return {}
+ returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...')
+ return {}
+
+ # Various fields
+ vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
+ processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
+ cache_size = _get_field(False, output, int, 0, 'machdep.cpu.cache.size')
+ stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
+ model = _get_field(False, output, int, 0, 'machdep.cpu.model')
+ family = _get_field(False, output, int, 0, 'machdep.cpu.family')
+
+ # Flags
+ flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
+ flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
+ flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
+ flags.sort()
+
+ # Convert from GHz/MHz string to Hz
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+ hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
+ hz_actual = _to_decimal_string(hz_actual)
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, 0),
+
+ 'l2_cache_size' : int(cache_size) * 1024,
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ 'flags' : flags
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ return {}
+
+
+def _get_cpu_info_from_sysinfo():
+ '''
+ Returns the CPU info gathered from sysinfo.
+ Returns {} if sysinfo is not found.
+ '''
+
+ info = _get_cpu_info_from_sysinfo_v1()
+ info.update(_get_cpu_info_from_sysinfo_v2())
+ return info
+
+def _get_cpu_info_from_sysinfo_v1():
+ '''
+ Returns the CPU info gathered from sysinfo.
+ Returns {} if sysinfo is not found.
+ '''
+
+ g_trace.header('Tying to get info from sysinfo version 1 ...')
+
+ try:
+ # Just return {} if there is no sysinfo
+ if not DataSource.has_sysinfo():
+ g_trace.fail('Failed to find sysinfo. Skipping ...')
+ return {}
+
+ # If sysinfo fails return {}
+ returncode, output = DataSource.sysinfo_cpu()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
+ return {}
+
+ # Various fields
+ vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
+ processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
+ cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
+ stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
+ model = int(output.split(', model ')[1].split(',')[0].strip())
+ family = int(output.split(', family ')[1].split(',')[0].strip())
+
+ # Flags
+ flags = []
+ for line in output.split('\n'):
+ if line.startswith('\t\t'):
+ for flag in line.strip().lower().split():
+ flags.append(flag)
+ flags.sort()
+
+ # Convert from GHz/MHz string to Hz
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+ hz_actual = hz_advertised
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, scale),
+
+ 'l2_cache_size' : _to_friendly_bytes(cache_size),
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ 'flags' : flags
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_sysinfo_v2():
+ '''
+ Returns the CPU info gathered from sysinfo.
+ Returns {} if sysinfo is not found.
+ '''
+
+ g_trace.header('Tying to get info from sysinfo version 2 ...')
+
+ try:
+ # Just return {} if there is no sysinfo
+ if not DataSource.has_sysinfo():
+ g_trace.fail('Failed to find sysinfo. Skipping ...')
+ return {}
+
+ # If sysinfo fails return {}
+ returncode, output = DataSource.sysinfo_cpu()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
+ return {}
+
+ # Various fields
+ vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
+ processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
+ cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
+ signature = output.split('Signature:')[1].split('\n')[0].strip()
+ #
+ stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
+ model = int(signature.split('model ')[1].split(',')[0].strip())
+ family = int(signature.split('family ')[1].split(',')[0].strip())
+
+ # Flags
+ def get_subsection_flags(output):
+ retval = []
+ for line in output.split('\n')[1:]:
+ if not line.startswith(' ') and not line.startswith(' '): break
+ for entry in line.strip().lower().split(' '):
+ retval.append(entry)
+ return retval
+
+ flags = get_subsection_flags(output.split('Features: ')[1]) + \
+ get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
+ get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
+ flags.sort()
+
+ # Convert from GHz/MHz string to Hz
+ lines = [n for n in output.split('\n') if n]
+ raw_hz = lines[0].split('running at ')[1].strip().lower()
+ hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
+ hz_advertised = _to_decimal_string(hz_advertised)
+ hz_actual = hz_advertised
+
+ scale = 0
+ if raw_hz.endswith('mhz'):
+ scale = 6
+ elif raw_hz.endswith('ghz'):
+ scale = 9
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, scale),
+
+ 'l2_cache_size' : _to_friendly_bytes(cache_size),
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ 'flags' : flags
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_wmic():
+ '''
+ Returns the CPU info gathered from WMI.
+ Returns {} if not on Windows, or wmic is not installed.
+ '''
+ g_trace.header('Tying to get info from wmic ...')
+
+ try:
+ # Just return {} if not Windows or there is no wmic
+ if not DataSource.is_windows or not DataSource.has_wmic():
+ g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...')
+ return {}
+
+ returncode, output = DataSource.wmic_cpu()
+ if output is None or returncode != 0:
+ g_trace.fail('Failed to run wmic. Skipping ...')
+ return {}
+
+ # Break the list into key values pairs
+ value = output.split("\n")
+ value = [s.rstrip().split('=') for s in value if '=' in s]
+ value = {k: v for k, v in value if v}
+
+ # Get the advertised MHz
+ processor_brand = value.get('Name')
+ hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
+
+ # Get the actual MHz
+ hz_actual = value.get('CurrentClockSpeed')
+ scale_actual = 6
+ if hz_actual:
+ hz_actual = _to_decimal_string(hz_actual)
+
+ # Get cache sizes
+ l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes
+ if l2_cache_size:
+ l2_cache_size = int(l2_cache_size) * 1024
+
+ l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes
+ if l3_cache_size:
+ l3_cache_size = int(l3_cache_size) * 1024
+
+ # Get family, model, and stepping
+ family, model, stepping = '', '', ''
+ description = value.get('Description') or value.get('Caption')
+ entries = description.split(' ')
+
+ if 'Family' in entries and entries.index('Family') < len(entries)-1:
+ i = entries.index('Family')
+ family = int(entries[i + 1])
+
+ if 'Model' in entries and entries.index('Model') < len(entries)-1:
+ i = entries.index('Model')
+ model = int(entries[i + 1])
+
+ if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
+ i = entries.index('Stepping')
+ stepping = int(entries[i + 1])
+
+ info = {
+ 'vendor_id_raw' : value.get('Manufacturer'),
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
+ 'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
+
+ 'l2_cache_size' : l2_cache_size,
+ 'l3_cache_size' : l3_cache_size,
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ #raise # NOTE: To have this throw on error, uncomment this line
+ return {}
+
+def _get_cpu_info_from_registry():
+ '''
+ Returns the CPU info gathered from the Windows Registry.
+ Returns {} if not on Windows.
+ '''
+
+ g_trace.header('Tying to get info from Windows registry ...')
+
+ try:
+ # Just return {} if not on Windows
+ if not DataSource.is_windows:
+ g_trace.fail('Not running on Windows. Skipping ...')
+ return {}
+
+ # Get the CPU name
+ processor_brand = DataSource.winreg_processor_brand().strip()
+
+ # Get the CPU vendor id
+ vendor_id = DataSource.winreg_vendor_id_raw()
+
+ # Get the CPU arch and bits
+ arch_string_raw = DataSource.winreg_arch_string_raw()
+ arch, bits = _parse_arch(arch_string_raw)
+
+ # Get the actual CPU Hz
+ hz_actual = DataSource.winreg_hz_actual()
+ hz_actual = _to_decimal_string(hz_actual)
+
+ # Get the advertised CPU Hz
+ hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
+
+ # If advertised hz not found, use the actual hz
+ if hz_advertised == '0.0':
+ scale = 6
+ hz_advertised = _to_decimal_string(hz_actual)
+
+ # Get the CPU features
+ feature_bits = DataSource.winreg_feature_bits()
+
+ def is_set(bit):
+ mask = 0x80000000 >> bit
+ retval = mask & feature_bits > 0
+ return retval
+
+ # http://en.wikipedia.org/wiki/CPUID
+ # http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
+ # http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
+ flags = {
+ 'fpu' : is_set(0), # Floating Point Unit
+ 'vme' : is_set(1), # V86 Mode Extensions
+ 'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
+ 'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
+ 'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
+ 'msr' : is_set(5), # Model Specific Registers
+ 'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
+ 'mce' : is_set(7), # Machine Check Exception supported
+ 'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
+ 'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
+ 'sepamd' : is_set(10), # Fast system calls (AMD only)
+ 'sep' : is_set(11), # Fast system calls
+ 'mtrr' : is_set(12), # Memory Type Range Registers
+ 'pge' : is_set(13), # Page Global Enable
+ 'mca' : is_set(14), # Machine Check Architecture
+ 'cmov' : is_set(15), # Conditional MOVe instructions
+ 'pat' : is_set(16), # Page Attribute Table
+ 'pse36' : is_set(17), # 36 bit Page Size Extensions
+ 'serial' : is_set(18), # Processor Serial Number
+ 'clflush' : is_set(19), # Cache Flush
+ #'reserved1' : is_set(20), # reserved
+ 'dts' : is_set(21), # Debug Trace Store
+ 'acpi' : is_set(22), # ACPI support
+ 'mmx' : is_set(23), # MultiMedia Extensions
+ 'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
+ 'sse' : is_set(25), # SSE instructions
+ 'sse2' : is_set(26), # SSE2 (WNI) instructions
+ 'ss' : is_set(27), # self snoop
+ #'reserved2' : is_set(28), # reserved
+ 'tm' : is_set(29), # Automatic clock control
+ 'ia64' : is_set(30), # IA64 instructions
+ '3dnow' : is_set(31) # 3DNow! instructions available
+ }
+
+ # Get a list of only the flags that are true
+ flags = [k for k, v in flags.items() if v]
+ flags.sort()
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, 6),
+
+ 'flags' : flags
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ return {}
+
+def _get_cpu_info_from_kstat():
+ '''
+ Returns the CPU info gathered from isainfo and kstat.
+ Returns {} if isainfo or kstat are not found.
+ '''
+
+ g_trace.header('Tying to get info from kstat ...')
+
+ try:
+ # Just return {} if there is no isainfo or kstat
+ if not DataSource.has_isainfo() or not DataSource.has_kstat():
+ g_trace.fail('Failed to find isinfo or kstat. Skipping ...')
+ return {}
+
+ # If isainfo fails return {}
+ returncode, flag_output = DataSource.isainfo_vb()
+ if flag_output is None or returncode != 0:
+ g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...')
+ return {}
+
+ # If kstat fails return {}
+ returncode, kstat = DataSource.kstat_m_cpu_info()
+ if kstat is None or returncode != 0:
+ g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...')
+ return {}
+
+ # Various fields
+ vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
+ processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
+ stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
+ model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
+ family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
+
+ # Flags
+ flags = flag_output.strip().split('\n')[-1].strip().lower().split()
+ flags.sort()
+
+ # Convert from GHz/MHz string to Hz
+ scale = 6
+ hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
+ hz_advertised = _to_decimal_string(hz_advertised)
+
+ # Convert from GHz/MHz string to Hz
+ hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
+ hz_actual = _to_decimal_string(hz_actual)
+
+ info = {
+ 'vendor_id_raw' : vendor_id,
+ 'brand_raw' : processor_brand,
+
+ 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
+ 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
+ 'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
+ 'hz_actual' : _hz_short_to_full(hz_actual, 0),
+
+ 'stepping' : stepping,
+ 'model' : model,
+ 'family' : family,
+ 'flags' : flags
+ }
+
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ return {}
+
+def _get_cpu_info_from_platform_uname():
+
+ g_trace.header('Tying to get info from platform.uname ...')
+
+ try:
+ uname = DataSource.uname_string_raw.split(',')[0]
+
+ family, model, stepping = (None, None, None)
+ entries = uname.split(' ')
+
+ if 'Family' in entries and entries.index('Family') < len(entries)-1:
+ i = entries.index('Family')
+ family = int(entries[i + 1])
+
+ if 'Model' in entries and entries.index('Model') < len(entries)-1:
+ i = entries.index('Model')
+ model = int(entries[i + 1])
+
+ if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
+ i = entries.index('Stepping')
+ stepping = int(entries[i + 1])
+
+ info = {
+ 'family' : family,
+ 'model' : model,
+ 'stepping' : stepping
+ }
+ info = _filter_dict_keys_with_empty_values(info)
+ g_trace.success()
+ return info
+ except Exception as err:
+ g_trace.fail(err)
+ return {}
+
+def _get_cpu_info_internal():
+ '''
+ Returns the CPU info by using the best sources of information for your OS.
+ Returns {} if nothing is found.
+ '''
+
+ g_trace.write('!' * 80)
+
+ # Get the CPU arch and bits
+ arch, bits = _parse_arch(DataSource.arch_string_raw)
+
+ friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
+ friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
+ PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
+
+ info = {
+ 'python_version' : PYTHON_VERSION,
+ 'cpuinfo_version' : CPUINFO_VERSION,
+ 'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
+ 'arch' : arch,
+ 'bits' : bits,
+ 'count' : DataSource.cpu_count,
+ 'arch_string_raw' : DataSource.arch_string_raw,
+ }
+
+ g_trace.write("python_version: {0}".format(info['python_version']))
+ g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version']))
+ g_trace.write("arch: {0}".format(info['arch']))
+ g_trace.write("bits: {0}".format(info['bits']))
+ g_trace.write("count: {0}".format(info['count']))
+ g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw']))
+
+ # Try the Windows wmic
+ _copy_new_fields(info, _get_cpu_info_from_wmic())
+
+ # Try the Windows registry
+ _copy_new_fields(info, _get_cpu_info_from_registry())
+
+ # Try /proc/cpuinfo
+ _copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
+
+ # Try cpufreq-info
+ _copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
+
+ # Try LSCPU
+ _copy_new_fields(info, _get_cpu_info_from_lscpu())
+
+ # Try sysctl
+ _copy_new_fields(info, _get_cpu_info_from_sysctl())
+
+ # Try kstat
+ _copy_new_fields(info, _get_cpu_info_from_kstat())
+
+ # Try dmesg
+ _copy_new_fields(info, _get_cpu_info_from_dmesg())
+
+ # Try /var/run/dmesg.boot
+ _copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
+
+ # Try lsprop ibm,pa-features
+ _copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
+
+ # Try sysinfo
+ _copy_new_fields(info, _get_cpu_info_from_sysinfo())
+
+ # Try querying the CPU cpuid register
+ # FIXME: This should print stdout and stderr to trace log
+ _copy_new_fields(info, _get_cpu_info_from_cpuid())
+
+ # Try platform.uname
+ _copy_new_fields(info, _get_cpu_info_from_platform_uname())
+
+ g_trace.write('!' * 80)
+
+ return info
+
+def get_cpu_info_json():
+ '''
+ Returns the CPU info by using the best sources of information for your OS.
+ Returns the result in a json string
+ '''
+
+ import json
+
+ output = None
+
+ # If running under pyinstaller, run normally
+ if getattr(sys, 'frozen', False):
+ info = _get_cpu_info_internal()
+ output = json.dumps(info)
+ output = "{0}".format(output)
+ # if not running under pyinstaller, run in another process.
+ # This is done because multiprocesing has a design flaw that
+ # causes non main programs to run multiple times on Windows.
+ else:
+ from subprocess import Popen, PIPE
+
+ command = [sys.executable, __file__, '--json']
+ p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ output = p1.communicate()[0]
+
+ if p1.returncode != 0:
+ return "{}"
+
+ output = output.decode(encoding='UTF-8')
+
+ return output
+
+def get_cpu_info():
+ '''
+ Returns the CPU info by using the best sources of information for your OS.
+ Returns the result in a dict
+ '''
+
+ import json
+
+ output = get_cpu_info_json()
+
+ # Convert JSON to Python with non unicode strings
+ output = json.loads(output, object_hook = _utf_to_str)
+
+ return output
+
+def main():
+ from argparse import ArgumentParser
+ import json
+
+ # Parse args
+ parser = ArgumentParser(description='Gets CPU info with pure Python')
+ parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
+ parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
+ parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file')
+ args = parser.parse_args()
+
+ global g_trace
+ g_trace = Trace(args.trace, False)
+
+ try:
+ _check_arch()
+ except Exception as err:
+ sys.stderr.write(str(err) + "\n")
+ sys.exit(1)
+
+ info = _get_cpu_info_internal()
+
+ if not info:
+ sys.stderr.write("Failed to find cpu info\n")
+ sys.exit(1)
+
+ if args.json:
+ print(json.dumps(info))
+ elif args.version:
+ print(CPUINFO_VERSION_STRING)
+ else:
+ print('Python Version: {0}'.format(info.get('python_version', '')))
+ print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
+ print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
+ print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
+ print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
+ print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
+ print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
+ print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
+ print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
+ print('Arch: {0}'.format(info.get('arch', '')))
+ print('Bits: {0}'.format(info.get('bits', '')))
+ print('Count: {0}'.format(info.get('count', '')))
+ print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
+ print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
+ print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
+ print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
+ print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
+ print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
+ print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
+ print('Stepping: {0}'.format(info.get('stepping', '')))
+ print('Model: {0}'.format(info.get('model', '')))
+ print('Family: {0}'.format(info.get('family', '')))
+ print('Processor Type: {0}'.format(info.get('processor_type', '')))
+ print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
+
+
+if __name__ == '__main__':
+ main()
+else:
+ g_trace = Trace(False, False)
+ _check_arch()
diff --git a/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/INSTALLER b/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/RECORD b/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..85f9bb29a2ef044cd70f5abb790b1b4565a6458c
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/RECORD
@@ -0,0 +1,9 @@
+__pycache__/ffmpy.cpython-310.pyc,,
+ffmpy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+ffmpy-0.4.0.dist-info/LICENSE,sha256=Ge5d77thSMVMLWBa3du0njex83-_GOW0Xk4frpS-UzE,1059
+ffmpy-0.4.0.dist-info/METADATA,sha256=CIiGN8oUQb-wE_xqitX7ozL1YdE7fnxp0Cw8Pi9bzSE,2923
+ffmpy-0.4.0.dist-info/RECORD,,
+ffmpy-0.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+ffmpy-0.4.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
+ffmpy.py,sha256=W_mYavEP_zT8_-jSmAWEJYJJMNpRz-7iXPqBsK-LhPs,9591
+py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/REQUESTED b/valley/lib/python3.10/site-packages/ffmpy-0.4.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/AUTHORS b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..eeed7887264f0537b6e71c0d9c9c93ea682d4352
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/AUTHORS
@@ -0,0 +1,98 @@
+James Robert
+ github: jiaaro
+ twitter: @jiaaro
+ web: jiaaro.com
+ email: pydub@jiaaro.com
+
+Marc Webbie
+ github: marcwebbie
+
+Jean-philippe Serafin
+ github: jeanphix
+
+Anurag Ramdasan
+ github: AnuragRamdasan
+
+Choongmin Lee
+ github: clee704
+
+Patrick Pittman
+ github: ptpittman
+
+Hunter Lang
+ github: hunterlang
+
+Alexey
+ github: nihisil
+
+Jaymz Campbell
+ github: jaymzcd
+
+Ross McFarland
+ github: ross
+
+John McMellen
+ github: jmcmellen
+
+Johan Lövgren
+ github: dashj
+
+Joachim Krüger
+ github: jkrgr
+
+Shichao An
+ github: shichao-an
+
+Michael Bortnyck
+ github: mbortnyck
+
+André Cloete
+ github: aj-cloete
+
+David Acacio
+ github: dacacioa
+
+Thiago Abdnur
+ github: bolaum
+
+Aurélien Ooms
+ github: aureooms
+
+Mike Mattozzi
+ github: mmattozzi
+
+Marcio Mazza
+ github: marciomazza
+
+Sungsu Lim
+ github: proflim
+
+Evandro Myller
+ github: emyller
+
+Sérgio Agostinho
+ github: SergioRAgostinho
+
+Antonio Larrosa
+ github: antlarr
+
+Aaron Craig
+ github: craigthelinguist
+
+Carlos del Castillo
+ github: greyalien502
+
+Yudong Sun
+ github: sunjerry019
+
+Jorge Perianez
+ github: JPery
+
+Chendi Luo
+ github: Creonalia
+
+Daniel Lefevre
+ gitHub: dplefevre
+
+Grzegorz Kotfis
+ github: gkotfis
\ No newline at end of file
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/INSTALLER b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/LICENSE b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..0cb49b7455d113ec0d0a2347150243888cdb2d00
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2011 James Robert, http://jiaaro.com
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/METADATA b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..a34f12ad2da48c7b325b06a87e4c5bb6e852cbec
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/METADATA
@@ -0,0 +1,37 @@
+Metadata-Version: 2.1
+Name: pydub
+Version: 0.25.1
+Summary: Manipulate audio with an simple and easy high level interface
+Home-page: http://pydub.com
+Author: James Robert
+Author-email: jiaaro@gmail.com
+License: MIT
+Keywords: audio sound high-level
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Multimedia :: Sound/Audio
+Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis
+Classifier: Topic :: Multimedia :: Sound/Audio :: Conversion
+Classifier: Topic :: Multimedia :: Sound/Audio :: Editors
+Classifier: Topic :: Multimedia :: Sound/Audio :: Mixers
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+
+
+Manipulate audio with an simple and easy high level interface.
+
+See the README file for details, usage info, and a list of gotchas.
+
+
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/RECORD b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..65d48ca3003a15696842b836e38cae27a592b6fc
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/RECORD
@@ -0,0 +1,30 @@
+pydub-0.25.1.dist-info/AUTHORS,sha256=AyY2PS9I2enOyBnUnxcpeAX-NnMNWLQT4yDtg8IIy78,1250
+pydub-0.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pydub-0.25.1.dist-info/LICENSE,sha256=roVlNiJMx6OJ6Wh3H8XyWYFL3Q2mNTnPcigq2672iXo,1074
+pydub-0.25.1.dist-info/METADATA,sha256=f0M8_ZVtbiYoUI9ejXIeJ03Jo9A5Nbi-0V1bVqs5iYk,1406
+pydub-0.25.1.dist-info/RECORD,,
+pydub-0.25.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pydub-0.25.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+pydub-0.25.1.dist-info/top_level.txt,sha256=PHhiDCQVZdycZxfKL2lQozruBT6ZhvyZAwqjRrw3t0w,6
+pydub/__init__.py,sha256=w1Xv1awbaR3fMhTNE1-grnfswgARTNQrKpBzfZ--VBA,39
+pydub/__pycache__/__init__.cpython-310.pyc,,
+pydub/__pycache__/audio_segment.cpython-310.pyc,,
+pydub/__pycache__/effects.cpython-310.pyc,,
+pydub/__pycache__/exceptions.cpython-310.pyc,,
+pydub/__pycache__/generators.cpython-310.pyc,,
+pydub/__pycache__/logging_utils.cpython-310.pyc,,
+pydub/__pycache__/playback.cpython-310.pyc,,
+pydub/__pycache__/pyaudioop.cpython-310.pyc,,
+pydub/__pycache__/scipy_effects.cpython-310.pyc,,
+pydub/__pycache__/silence.cpython-310.pyc,,
+pydub/__pycache__/utils.cpython-310.pyc,,
+pydub/audio_segment.py,sha256=Nf5VkHGY1v9Jqb7NtEYfwRpLrfqusfBdPGOZsi7R5Cg,49185
+pydub/effects.py,sha256=1HUMzhefrwG_E1rTnzvbl-P0-KNuwHklCnu8QCGS7jA,11507
+pydub/exceptions.py,sha256=osgXoUujwpH8K6hr80iYpW30CMBDFwqyaRD-5d7ZpKs,455
+pydub/generators.py,sha256=u6q7J8JLOY-uEZqMPUTzakxyua3XNQcPiDsuiK2-lLA,4045
+pydub/logging_utils.py,sha256=WuSqfzn4zyT7PxXHGV-PXMDynufeM6sC6eSmVlGX2RU,374
+pydub/playback.py,sha256=zFngVclUL_7oDipjzKC8b7jToPNV11DV28rGyH8pio0,1987
+pydub/pyaudioop.py,sha256=Dp_cQgAyYjD4OV2ZHuxtKI2KABuPi9YYNRUF8giR80Q,13094
+pydub/scipy_effects.py,sha256=U2p8AQuVreTp5MrtUAzRbWgOHUc6Dwq0TAG_RtEg-7g,6637
+pydub/silence.py,sha256=F6MV0VlaO6mtuisjLGks_UR-GVmzO1v87_NKvzwRc30,6457
+pydub/utils.py,sha256=W71pgJFbbNP3adH63yn0Eo0CLLVgzXG7WHYSXpWvdyc,12368
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/REQUESTED b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/WHEEL b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..01b8fc7d4a10cb8b4f1d21f11d3398d07d6b3478
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/top_level.txt b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0a0320efe3ed055145a3d827228b3451e8fbb938
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/pydub-0.25.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pydub
diff --git a/valley/lib/python3.10/site-packages/referencing/__init__.py b/valley/lib/python3.10/site-packages/referencing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e09207d7e4b90aba221181d87886fd4f54038abf
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/__init__.py
@@ -0,0 +1,7 @@
+"""
+Cross-specification, implementation-agnostic JSON referencing.
+"""
+
+from referencing._core import Anchor, Registry, Resource, Specification
+
+__all__ = ["Anchor", "Registry", "Resource", "Specification"]
diff --git a/valley/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc b/valley/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10923f2ce8baedc149dfe152b5ed9d96be4b7373
Binary files /dev/null and b/valley/lib/python3.10/site-packages/referencing/__pycache__/jsonschema.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/referencing/__pycache__/typing.cpython-310.pyc b/valley/lib/python3.10/site-packages/referencing/__pycache__/typing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb4fa71c9e2d843d216eead92686f81d4d790eac
Binary files /dev/null and b/valley/lib/python3.10/site-packages/referencing/__pycache__/typing.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/referencing/_attrs.py b/valley/lib/python3.10/site-packages/referencing/_attrs.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae85b865fed622afe83e8d6b7b17a1f0d174aba3
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/_attrs.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from typing import NoReturn, TypeVar
+
+from attrs import define as _define, frozen as _frozen
+
+_T = TypeVar("_T")
+
+
+def define(cls: type[_T]) -> type[_T]: # pragma: no cover
+ cls.__init_subclass__ = _do_not_subclass
+ return _define(cls)
+
+
+def frozen(cls: type[_T]) -> type[_T]:
+ cls.__init_subclass__ = _do_not_subclass
+ return _frozen(cls)
+
+
+class UnsupportedSubclassing(Exception):
+ def __str__(self):
+ return (
+ "Subclassing is not part of referencing's public API. "
+ "If no other suitable API exists for what you're trying to do, "
+ "feel free to file an issue asking for one."
+ )
+
+
+@staticmethod
+def _do_not_subclass() -> NoReturn: # pragma: no cover
+ raise UnsupportedSubclassing()
diff --git a/valley/lib/python3.10/site-packages/referencing/_attrs.pyi b/valley/lib/python3.10/site-packages/referencing/_attrs.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..278e4109b622dc3ecab7e3e0d0562ba594b80a33
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/_attrs.pyi
@@ -0,0 +1,20 @@
+from typing import Any, Callable, TypeVar, Union
+
+from attr import attrib, field
+
+class UnsupportedSubclassing(Exception): ...
+
+_T = TypeVar("_T")
+
+def __dataclass_transform__(
+ *,
+ frozen_default: bool = False,
+ field_descriptors: tuple[Union[type, Callable[..., Any]], ...] = ...,
+) -> Callable[[_T], _T]: ...
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(cls: type[_T]) -> type[_T]: ...
+@__dataclass_transform__(
+ frozen_default=True,
+ field_descriptors=(attrib, field),
+)
+def frozen(cls: type[_T]) -> type[_T]: ...
diff --git a/valley/lib/python3.10/site-packages/referencing/_core.py b/valley/lib/python3.10/site-packages/referencing/_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..28e3d524b523259c8e3cfa4e00d8a0e32c0da4a9
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/_core.py
@@ -0,0 +1,729 @@
+from __future__ import annotations
+
+from collections.abc import Iterable, Iterator, Sequence
+from enum import Enum
+from typing import Any, Callable, ClassVar, Generic, Protocol, TypeVar
+from urllib.parse import unquote, urldefrag, urljoin
+
+from attrs import evolve, field
+from rpds import HashTrieMap, HashTrieSet, List
+
+from referencing import exceptions
+from referencing._attrs import frozen
+from referencing.typing import URI, Anchor as AnchorType, D, Mapping, Retrieve
+
+EMPTY_UNCRAWLED: HashTrieSet[URI] = HashTrieSet()
+EMPTY_PREVIOUS_RESOLVERS: List[URI] = List()
+
+
+class _Unset(Enum):
+ """
+ What sillyness...
+ """
+
+ SENTINEL = 1
+
+
+_UNSET = _Unset.SENTINEL
+
+
+class _MaybeInSubresource(Protocol[D]):
+ def __call__(
+ self,
+ segments: Sequence[int | str],
+ resolver: Resolver[D],
+ subresource: Resource[D],
+ ) -> Resolver[D]: ...
+
+
+def _detect_or_error(contents: D) -> Specification[D]:
+ if not isinstance(contents, Mapping):
+ raise exceptions.CannotDetermineSpecification(contents)
+
+ jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType]
+ if not isinstance(jsonschema_dialect_id, str):
+ raise exceptions.CannotDetermineSpecification(contents)
+
+ from referencing.jsonschema import specification_with
+
+ return specification_with(jsonschema_dialect_id)
+
+
+def _detect_or_default(
+ default: Specification[D],
+) -> Callable[[D], Specification[D]]:
+ def _detect(contents: D) -> Specification[D]:
+ if not isinstance(contents, Mapping):
+ return default
+
+ jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType]
+ if jsonschema_dialect_id is None:
+ return default
+
+ from referencing.jsonschema import specification_with
+
+ return specification_with(
+ jsonschema_dialect_id, # type: ignore[reportUnknownArgumentType]
+ default=default,
+ )
+
+ return _detect
+
+
+class _SpecificationDetector:
+ def __get__(
+ self,
+ instance: Specification[D] | None,
+ cls: type[Specification[D]],
+ ) -> Callable[[D], Specification[D]]:
+ if instance is None:
+ return _detect_or_error
+ else:
+ return _detect_or_default(instance)
+
+
+@frozen
+class Specification(Generic[D]):
+ """
+ A specification which defines referencing behavior.
+
+ The various methods of a `Specification` allow for varying referencing
+ behavior across JSON Schema specification versions, etc.
+ """
+
+ #: A short human-readable name for the specification, used for debugging.
+ name: str
+
+ #: Find the ID of a given document.
+ id_of: Callable[[D], URI | None]
+
+ #: Retrieve the subresources of the given document (without traversing into
+ #: the subresources themselves).
+ subresources_of: Callable[[D], Iterable[D]]
+
+ #: While resolving a JSON pointer, conditionally enter a subresource
+ #: (if e.g. we have just entered a keyword whose value is a subresource)
+ maybe_in_subresource: _MaybeInSubresource[D]
+
+ #: Retrieve the anchors contained in the given document.
+ _anchors_in: Callable[
+ [Specification[D], D],
+ Iterable[AnchorType[D]],
+ ] = field(alias="anchors_in")
+
+ #: An opaque specification where resources have no subresources
+ #: nor internal identifiers.
+ OPAQUE: ClassVar[Specification[Any]]
+
+ #: Attempt to discern which specification applies to the given contents.
+ #:
+ #: May be called either as an instance method or as a class method, with
+ #: slightly different behavior in the following case:
+ #:
+ #: Recall that not all contents contains enough internal information about
+ #: which specification it is written for -- the JSON Schema ``{}``,
+ #: for instance, is valid under many different dialects and may be
+ #: interpreted as any one of them.
+ #:
+ #: When this method is used as an instance method (i.e. called on a
+ #: specific specification), that specification is used as the default
+ #: if the given contents are unidentifiable.
+ #:
+ #: On the other hand when called as a class method, an error is raised.
+ #:
+ #: To reiterate, ``DRAFT202012.detect({})`` will return ``DRAFT202012``
+ #: whereas the class method ``Specification.detect({})`` will raise an
+ #: error.
+ #:
+ #: (Note that of course ``DRAFT202012.detect(...)`` may return some other
+ #: specification when given a schema which *does* identify as being for
+ #: another version).
+ #:
+ #: Raises:
+ #:
+ #: `CannotDetermineSpecification`
+ #:
+ #: if the given contents don't have any discernible
+ #: information which could be used to guess which
+ #: specification they identify as
+ detect = _SpecificationDetector()
+
+ def __repr__(self) -> str:
+ return f""
+
+ def anchors_in(self, contents: D):
+ """
+ Retrieve the anchors contained in the given document.
+ """
+ return self._anchors_in(self, contents)
+
+ def create_resource(self, contents: D) -> Resource[D]:
+ """
+ Create a resource which is interpreted using this specification.
+ """
+ return Resource(contents=contents, specification=self)
+
+
+Specification.OPAQUE = Specification(
+ name="opaque",
+ id_of=lambda contents: None,
+ subresources_of=lambda contents: [],
+ anchors_in=lambda specification, contents: [],
+ maybe_in_subresource=lambda segments, resolver, subresource: resolver,
+)
+
+
+@frozen
+class Resource(Generic[D]):
+ r"""
+ A document (deserialized JSON) with a concrete interpretation under a spec.
+
+ In other words, a Python object, along with an instance of `Specification`
+ which describes how the document interacts with referencing -- both
+ internally (how it refers to other `Resource`\ s) and externally (how it
+ should be identified such that it is referenceable by other documents).
+ """
+
+ contents: D
+ _specification: Specification[D] = field(alias="specification")
+
+ @classmethod
+ def from_contents(
+ cls,
+ contents: D,
+ default_specification: (
+ type[Specification[D]] | Specification[D]
+ ) = Specification,
+ ) -> Resource[D]:
+ """
+ Create a resource guessing which specification applies to the contents.
+
+ Raises:
+
+ `CannotDetermineSpecification`
+
+ if the given contents don't have any discernible
+ information which could be used to guess which
+ specification they identify as
+
+ """
+ specification = default_specification.detect(contents)
+ return specification.create_resource(contents=contents)
+
+ @classmethod
+ def opaque(cls, contents: D) -> Resource[D]:
+ """
+ Create an opaque `Resource` -- i.e. one with opaque specification.
+
+ See `Specification.OPAQUE` for details.
+ """
+ return Specification.OPAQUE.create_resource(contents=contents)
+
+ def id(self) -> URI | None:
+ """
+ Retrieve this resource's (specification-specific) identifier.
+ """
+ id = self._specification.id_of(self.contents)
+ if id is None:
+ return
+ return id.rstrip("#")
+
+ def subresources(self) -> Iterable[Resource[D]]:
+ """
+ Retrieve this resource's subresources.
+ """
+ return (
+ Resource.from_contents(
+ each,
+ default_specification=self._specification,
+ )
+ for each in self._specification.subresources_of(self.contents)
+ )
+
+ def anchors(self) -> Iterable[AnchorType[D]]:
+ """
+ Retrieve this resource's (specification-specific) identifier.
+ """
+ return self._specification.anchors_in(self.contents)
+
+ def pointer(self, pointer: str, resolver: Resolver[D]) -> Resolved[D]:
+ """
+ Resolve the given JSON pointer.
+
+ Raises:
+
+ `exceptions.PointerToNowhere`
+
+ if the pointer points to a location not present in the document
+
+ """
+ if not pointer:
+ return Resolved(contents=self.contents, resolver=resolver)
+
+ contents = self.contents
+ segments: list[int | str] = []
+ for segment in unquote(pointer[1:]).split("/"):
+ if isinstance(contents, Sequence):
+ segment = int(segment)
+ else:
+ segment = segment.replace("~1", "/").replace("~0", "~")
+ try:
+ contents = contents[segment] # type: ignore[reportUnknownArgumentType]
+ except LookupError as lookup_error:
+ error = exceptions.PointerToNowhere(ref=pointer, resource=self)
+ raise error from lookup_error
+
+ segments.append(segment)
+ last = resolver
+ resolver = self._specification.maybe_in_subresource(
+ segments=segments,
+ resolver=resolver,
+ subresource=self._specification.create_resource(contents),
+ )
+ if resolver is not last:
+ segments = []
+ return Resolved(contents=contents, resolver=resolver) # type: ignore[reportUnknownArgumentType]
+
+
+def _fail_to_retrieve(uri: URI):
+ raise exceptions.NoSuchResource(ref=uri)
+
+
+@frozen
+class Registry(Mapping[URI, Resource[D]]):
+ r"""
+ A registry of `Resource`\ s, each identified by their canonical URIs.
+
+ Registries store a collection of in-memory resources, and optionally
+ enable additional resources which may be stored elsewhere (e.g. in a
+ database, a separate set of files, over the network, etc.).
+
+ They also lazily walk their known resources, looking for subresources
+ within them. In other words, subresources contained within any added
+ resources will be retrievable via their own IDs (though this discovery of
+ subresources will be delayed until necessary).
+
+ Registries are immutable, and their methods return new instances of the
+ registry with the additional resources added to them.
+
+ The ``retrieve`` argument can be used to configure retrieval of resources
+ dynamically, either over the network, from a database, or the like.
+ Pass it a callable which will be called if any URI not present in the
+ registry is accessed. It must either return a `Resource` or else raise a
+ `NoSuchResource` exception indicating that the resource does not exist
+ even according to the retrieval logic.
+ """
+
+ _resources: HashTrieMap[URI, Resource[D]] = field(
+ default=HashTrieMap(),
+ converter=HashTrieMap.convert, # type: ignore[reportGeneralTypeIssues]
+ alias="resources",
+ )
+ _anchors: HashTrieMap[tuple[URI, str], AnchorType[D]] = HashTrieMap()
+ _uncrawled: HashTrieSet[URI] = EMPTY_UNCRAWLED
+ _retrieve: Retrieve[D] = field(default=_fail_to_retrieve, alias="retrieve")
+
+ def __getitem__(self, uri: URI) -> Resource[D]:
+ """
+ Return the (already crawled) `Resource` identified by the given URI.
+ """
+ try:
+ return self._resources[uri.rstrip("#")]
+ except KeyError:
+ raise exceptions.NoSuchResource(ref=uri) from None
+
+ def __iter__(self) -> Iterator[URI]:
+ """
+ Iterate over all crawled URIs in the registry.
+ """
+ return iter(self._resources)
+
+ def __len__(self) -> int:
+ """
+ Count the total number of fully crawled resources in this registry.
+ """
+ return len(self._resources)
+
+ def __rmatmul__(
+ self,
+ new: Resource[D] | Iterable[Resource[D]],
+ ) -> Registry[D]:
+ """
+ Create a new registry with resource(s) added using their internal IDs.
+
+ Resources must have a internal IDs (e.g. the :kw:`$id` keyword in
+ modern JSON Schema versions), otherwise an error will be raised.
+
+ Both a single resource as well as an iterable of resources works, i.e.:
+
+ * ``resource @ registry`` or
+
+ * ``[iterable, of, multiple, resources] @ registry``
+
+ which -- again, assuming the resources have internal IDs -- is
+ equivalent to calling `Registry.with_resources` as such:
+
+ .. code:: python
+
+ registry.with_resources(
+ (resource.id(), resource) for resource in new_resources
+ )
+
+ Raises:
+
+ `NoInternalID`
+
+ if the resource(s) in fact do not have IDs
+
+ """
+ if isinstance(new, Resource):
+ new = (new,)
+
+ resources = self._resources
+ uncrawled = self._uncrawled
+ for resource in new:
+ id = resource.id()
+ if id is None:
+ raise exceptions.NoInternalID(resource=resource)
+ uncrawled = uncrawled.insert(id)
+ resources = resources.insert(id, resource)
+ return evolve(self, resources=resources, uncrawled=uncrawled)
+
+ def __repr__(self) -> str:
+ size = len(self)
+ pluralized = "resource" if size == 1 else "resources"
+ if self._uncrawled:
+ uncrawled = len(self._uncrawled)
+ if uncrawled == size:
+ summary = f"uncrawled {pluralized}"
+ else:
+ summary = f"{pluralized}, {uncrawled} uncrawled"
+ else:
+ summary = f"{pluralized}"
+ return f""
+
+ def get_or_retrieve(self, uri: URI) -> Retrieved[D, Resource[D]]:
+ """
+ Get a resource from the registry, crawling or retrieving if necessary.
+
+ May involve crawling to find the given URI if it is not already known,
+ so the returned object is a `Retrieved` object which contains both the
+ resource value as well as the registry which ultimately contained it.
+ """
+ resource = self._resources.get(uri)
+ if resource is not None:
+ return Retrieved(registry=self, value=resource)
+
+ registry = self.crawl()
+ resource = registry._resources.get(uri)
+ if resource is not None:
+ return Retrieved(registry=registry, value=resource)
+
+ try:
+ resource = registry._retrieve(uri)
+ except (
+ exceptions.CannotDetermineSpecification,
+ exceptions.NoSuchResource,
+ ):
+ raise
+ except Exception as error:
+ raise exceptions.Unretrievable(ref=uri) from error
+ else:
+ registry = registry.with_resource(uri, resource)
+ return Retrieved(registry=registry, value=resource)
+
+ def remove(self, uri: URI):
+ """
+ Return a registry with the resource identified by a given URI removed.
+ """
+ if uri not in self._resources:
+ raise exceptions.NoSuchResource(ref=uri)
+
+ return evolve(
+ self,
+ resources=self._resources.remove(uri),
+ uncrawled=self._uncrawled.discard(uri),
+ anchors=HashTrieMap(
+ (k, v) for k, v in self._anchors.items() if k[0] != uri
+ ),
+ )
+
+ def anchor(self, uri: URI, name: str):
+ """
+ Retrieve a given anchor from a resource which must already be crawled.
+ """
+ value = self._anchors.get((uri, name))
+ if value is not None:
+ return Retrieved(value=value, registry=self)
+
+ registry = self.crawl()
+ value = registry._anchors.get((uri, name))
+ if value is not None:
+ return Retrieved(value=value, registry=registry)
+
+ resource = self[uri]
+ canonical_uri = resource.id()
+ if canonical_uri is not None:
+ value = registry._anchors.get((canonical_uri, name))
+ if value is not None:
+ return Retrieved(value=value, registry=registry)
+
+ if "/" in name:
+ raise exceptions.InvalidAnchor(
+ ref=uri,
+ resource=resource,
+ anchor=name,
+ )
+ raise exceptions.NoSuchAnchor(ref=uri, resource=resource, anchor=name)
+
+ def contents(self, uri: URI) -> D:
+ """
+ Retrieve the (already crawled) contents identified by the given URI.
+ """
+ return self[uri].contents
+
+ def crawl(self) -> Registry[D]:
+ """
+ Crawl all added resources, discovering subresources.
+ """
+ resources = self._resources
+ anchors = self._anchors
+ uncrawled = [(uri, resources[uri]) for uri in self._uncrawled]
+ while uncrawled:
+ uri, resource = uncrawled.pop()
+
+ id = resource.id()
+ if id is not None:
+ uri = urljoin(uri, id)
+ resources = resources.insert(uri, resource)
+ for each in resource.anchors():
+ anchors = anchors.insert((uri, each.name), each)
+ uncrawled.extend((uri, each) for each in resource.subresources())
+ return evolve(
+ self,
+ resources=resources,
+ anchors=anchors,
+ uncrawled=EMPTY_UNCRAWLED,
+ )
+
+ def with_resource(self, uri: URI, resource: Resource[D]):
+ """
+ Add the given `Resource` to the registry, without crawling it.
+ """
+ return self.with_resources([(uri, resource)])
+
+ def with_resources(
+ self,
+ pairs: Iterable[tuple[URI, Resource[D]]],
+ ) -> Registry[D]:
+ r"""
+ Add the given `Resource`\ s to the registry, without crawling them.
+ """
+ resources = self._resources
+ uncrawled = self._uncrawled
+ for uri, resource in pairs:
+ # Empty fragment URIs are equivalent to URIs without the fragment.
+ # TODO: Is this true for non JSON Schema resources? Probably not.
+ uri = uri.rstrip("#")
+ uncrawled = uncrawled.insert(uri)
+ resources = resources.insert(uri, resource)
+ return evolve(self, resources=resources, uncrawled=uncrawled)
+
+ def with_contents(
+ self,
+ pairs: Iterable[tuple[URI, D]],
+ **kwargs: Any,
+ ) -> Registry[D]:
+ r"""
+ Add the given contents to the registry, autodetecting when necessary.
+ """
+ return self.with_resources(
+ (uri, Resource.from_contents(each, **kwargs))
+ for uri, each in pairs
+ )
+
+ def combine(self, *registries: Registry[D]) -> Registry[D]:
+ """
+ Combine together one or more other registries, producing a unified one.
+ """
+ if registries == (self,):
+ return self
+ resources = self._resources
+ anchors = self._anchors
+ uncrawled = self._uncrawled
+ retrieve = self._retrieve
+ for registry in registries:
+ resources = resources.update(registry._resources)
+ anchors = anchors.update(registry._anchors)
+ uncrawled = uncrawled.update(registry._uncrawled)
+
+ if registry._retrieve is not _fail_to_retrieve:
+ if registry._retrieve is not retrieve is not _fail_to_retrieve:
+ raise ValueError( # noqa: TRY003
+ "Cannot combine registries with conflicting retrieval "
+ "functions.",
+ )
+ retrieve = registry._retrieve
+ return evolve(
+ self,
+ anchors=anchors,
+ resources=resources,
+ uncrawled=uncrawled,
+ retrieve=retrieve,
+ )
+
+ def resolver(self, base_uri: URI = "") -> Resolver[D]:
+ """
+ Return a `Resolver` which resolves references against this registry.
+ """
+ return Resolver(base_uri=base_uri, registry=self)
+
+ def resolver_with_root(self, resource: Resource[D]) -> Resolver[D]:
+ """
+ Return a `Resolver` with a specific root resource.
+ """
+ uri = resource.id() or ""
+ return Resolver(
+ base_uri=uri,
+ registry=self.with_resource(uri, resource),
+ )
+
+
+#: An anchor or resource.
+AnchorOrResource = TypeVar("AnchorOrResource", AnchorType[Any], Resource[Any])
+
+
+@frozen
+class Retrieved(Generic[D, AnchorOrResource]):
+ """
+ A value retrieved from a `Registry`.
+ """
+
+ value: AnchorOrResource
+ registry: Registry[D]
+
+
+@frozen
+class Resolved(Generic[D]):
+ """
+ A reference resolved to its contents by a `Resolver`.
+ """
+
+ contents: D
+ resolver: Resolver[D]
+
+
+@frozen
+class Resolver(Generic[D]):
+ """
+ A reference resolver.
+
+ Resolvers help resolve references (including relative ones) by
+ pairing a fixed base URI with a `Registry`.
+
+ This object, under normal circumstances, is expected to be used by
+ *implementers of libraries* built on top of `referencing` (e.g. JSON Schema
+ implementations or other libraries resolving JSON references),
+ not directly by end-users populating registries or while writing
+ schemas or other resources.
+
+ References are resolved against the base URI, and the combined URI
+ is then looked up within the registry.
+
+ The process of resolving a reference may itself involve calculating
+ a *new* base URI for future reference resolution (e.g. if an
+ intermediate resource sets a new base URI), or may involve encountering
+ additional subresources and adding them to a new registry.
+ """
+
+ _base_uri: URI = field(alias="base_uri")
+ _registry: Registry[D] = field(alias="registry")
+ _previous: List[URI] = field(default=List(), repr=False, alias="previous")
+
+ def lookup(self, ref: URI) -> Resolved[D]:
+ """
+ Resolve the given reference to the resource it points to.
+
+ Raises:
+
+ `exceptions.Unresolvable`
+
+ or a subclass thereof (see below) if the reference isn't
+ resolvable
+
+ `exceptions.NoSuchAnchor`
+
+ if the reference is to a URI where a resource exists but
+ contains a plain name fragment which does not exist within
+ the resource
+
+ `exceptions.PointerToNowhere`
+
+ if the reference is to a URI where a resource exists but
+ contains a JSON pointer to a location within the resource
+ that does not exist
+
+ """
+ if ref.startswith("#"):
+ uri, fragment = self._base_uri, ref[1:]
+ else:
+ uri, fragment = urldefrag(urljoin(self._base_uri, ref))
+ try:
+ retrieved = self._registry.get_or_retrieve(uri)
+ except exceptions.NoSuchResource:
+ raise exceptions.Unresolvable(ref=ref) from None
+ except exceptions.Unretrievable as error:
+ raise exceptions.Unresolvable(ref=ref) from error
+
+ if fragment.startswith("/"):
+ resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
+ return retrieved.value.pointer(pointer=fragment, resolver=resolver)
+
+ if fragment:
+ retrieved = retrieved.registry.anchor(uri, fragment)
+ resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
+ return retrieved.value.resolve(resolver=resolver)
+
+ resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
+ return Resolved(contents=retrieved.value.contents, resolver=resolver)
+
+ def in_subresource(self, subresource: Resource[D]) -> Resolver[D]:
+ """
+ Create a resolver for a subresource (which may have a new base URI).
+ """
+ id = subresource.id()
+ if id is None:
+ return self
+ return evolve(self, base_uri=urljoin(self._base_uri, id))
+
+ def dynamic_scope(self) -> Iterable[tuple[URI, Registry[D]]]:
+ """
+ In specs with such a notion, return the URIs in the dynamic scope.
+ """
+ for uri in self._previous:
+ yield uri, self._registry
+
+ def _evolve(self, base_uri: URI, **kwargs: Any):
+ """
+ Evolve, appending to the dynamic scope.
+ """
+ previous = self._previous
+ if self._base_uri and (not previous or base_uri != self._base_uri):
+ previous = previous.push_front(self._base_uri)
+ return evolve(self, base_uri=base_uri, previous=previous, **kwargs)
+
+
+@frozen
+class Anchor(Generic[D]):
+ """
+ A simple anchor in a `Resource`.
+ """
+
+ name: str
+ resource: Resource[D]
+
+ def resolve(self, resolver: Resolver[D]):
+ """
+ Return the resource for this anchor.
+ """
+ return Resolved(contents=self.resource.contents, resolver=resolver)
diff --git a/valley/lib/python3.10/site-packages/referencing/exceptions.py b/valley/lib/python3.10/site-packages/referencing/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..3267fc70732e73c0a888d9f60551ad9373ed6d16
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/exceptions.py
@@ -0,0 +1,165 @@
+"""
+Errors, oh no!
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
+import attrs
+
+from referencing._attrs import frozen
+
+if TYPE_CHECKING:
+ from referencing import Resource
+ from referencing.typing import URI
+
+
+@frozen
+class NoSuchResource(KeyError):
+ """
+ The given URI is not present in a registry.
+
+ Unlike most exceptions, this class *is* intended to be publicly
+ instantiable and *is* part of the public API of the package.
+ """
+
+ ref: URI
+
+ def __eq__(self, other: object) -> bool:
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return attrs.astuple(self) == attrs.astuple(other)
+
+ def __hash__(self) -> int:
+ return hash(attrs.astuple(self))
+
+
+@frozen
+class NoInternalID(Exception):
+ """
+ A resource has no internal ID, but one is needed.
+
+ E.g. in modern JSON Schema drafts, this is the :kw:`$id` keyword.
+
+ One might be needed if a resource was to-be added to a registry but no
+ other URI is available, and the resource doesn't declare its canonical URI.
+ """
+
+ resource: Resource[Any]
+
+ def __eq__(self, other: object) -> bool:
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return attrs.astuple(self) == attrs.astuple(other)
+
+ def __hash__(self) -> int:
+ return hash(attrs.astuple(self))
+
+
+@frozen
+class Unretrievable(KeyError):
+ """
+ The given URI is not present in a registry, and retrieving it failed.
+ """
+
+ ref: URI
+
+ def __eq__(self, other: object) -> bool:
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return attrs.astuple(self) == attrs.astuple(other)
+
+ def __hash__(self) -> int:
+ return hash(attrs.astuple(self))
+
+
+@frozen
+class CannotDetermineSpecification(Exception):
+ """
+ Attempting to detect the appropriate `Specification` failed.
+
+ This happens if no discernible information is found in the contents of the
+ new resource which would help identify it.
+ """
+
+ contents: Any
+
+ def __eq__(self, other: object) -> bool:
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return attrs.astuple(self) == attrs.astuple(other)
+
+ def __hash__(self) -> int:
+ return hash(attrs.astuple(self))
+
+
+@attrs.frozen # Because here we allow subclassing below.
+class Unresolvable(Exception):
+ """
+ A reference was unresolvable.
+ """
+
+ ref: URI
+
+ def __eq__(self, other: object) -> bool:
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return attrs.astuple(self) == attrs.astuple(other)
+
+ def __hash__(self) -> int:
+ return hash(attrs.astuple(self))
+
+
+@frozen
+class PointerToNowhere(Unresolvable):
+ """
+ A JSON Pointer leads to a part of a document that does not exist.
+ """
+
+ resource: Resource[Any]
+
+ def __str__(self) -> str:
+ msg = f"{self.ref!r} does not exist within {self.resource.contents!r}"
+ if self.ref == "/":
+ msg += (
+ ". The pointer '/' is a valid JSON Pointer but it points to "
+ "an empty string property ''. If you intended to point "
+ "to the entire resource, you should use '#'."
+ )
+ return msg
+
+
+@frozen
+class NoSuchAnchor(Unresolvable):
+ """
+ An anchor does not exist within a particular resource.
+ """
+
+ resource: Resource[Any]
+ anchor: str
+
+ def __str__(self) -> str:
+ return (
+ f"{self.anchor!r} does not exist within {self.resource.contents!r}"
+ )
+
+
+@frozen
+class InvalidAnchor(Unresolvable):
+ """
+ An anchor which could never exist in a resource was dereferenced.
+
+ It is somehow syntactically invalid.
+ """
+
+ resource: Resource[Any]
+ anchor: str
+
+ def __str__(self) -> str:
+ return (
+ f"'#{self.anchor}' is not a valid anchor, neither as a "
+ "plain name anchor nor as a JSON Pointer. You may have intended "
+ f"to use '#/{self.anchor}', as the slash is required *before each "
+ "segment* of a JSON pointer."
+ )
diff --git a/valley/lib/python3.10/site-packages/referencing/jsonschema.py b/valley/lib/python3.10/site-packages/referencing/jsonschema.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb8668a40ede0917f4d53f341d94e0dbf7eebbf6
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/jsonschema.py
@@ -0,0 +1,642 @@
+"""
+Referencing implementations for JSON Schema specs (historic & current).
+"""
+
+from __future__ import annotations
+
+from collections.abc import Sequence, Set
+from typing import Any, Iterable, Union
+
+from referencing import Anchor, Registry, Resource, Specification, exceptions
+from referencing._attrs import frozen
+from referencing._core import (
+ _UNSET, # type: ignore[reportPrivateUsage]
+ Resolved as _Resolved,
+ Resolver as _Resolver,
+ _Unset, # type: ignore[reportPrivateUsage]
+)
+from referencing.typing import URI, Anchor as AnchorType, Mapping
+
+#: A JSON Schema which is a JSON object
+ObjectSchema = Mapping[str, Any]
+
+#: A JSON Schema of any kind
+Schema = Union[bool, ObjectSchema]
+
+#: A Resource whose contents are JSON Schemas
+SchemaResource = Resource[Schema]
+
+#: A JSON Schema Registry
+SchemaRegistry = Registry[Schema]
+
+#: The empty JSON Schema Registry
+EMPTY_REGISTRY: SchemaRegistry = Registry()
+
+
+@frozen
+class UnknownDialect(Exception):
+ """
+ A dialect identifier was found for a dialect unknown by this library.
+
+ If it's a custom ("unofficial") dialect, be sure you've registered it.
+ """
+
+ uri: URI
+
+
+def _dollar_id(contents: Schema) -> URI | None:
+ if isinstance(contents, bool):
+ return
+ return contents.get("$id")
+
+
+def _legacy_dollar_id(contents: Schema) -> URI | None:
+ if isinstance(contents, bool) or "$ref" in contents:
+ return
+ id = contents.get("$id")
+ if id is not None and not id.startswith("#"):
+ return id
+
+
+def _legacy_id(contents: ObjectSchema) -> URI | None:
+ if "$ref" in contents:
+ return
+ id = contents.get("id")
+ if id is not None and not id.startswith("#"):
+ return id
+
+
+def _anchor(
+ specification: Specification[Schema],
+ contents: Schema,
+) -> Iterable[AnchorType[Schema]]:
+ if isinstance(contents, bool):
+ return
+ anchor = contents.get("$anchor")
+ if anchor is not None:
+ yield Anchor(
+ name=anchor,
+ resource=specification.create_resource(contents),
+ )
+
+ dynamic_anchor = contents.get("$dynamicAnchor")
+ if dynamic_anchor is not None:
+ yield DynamicAnchor(
+ name=dynamic_anchor,
+ resource=specification.create_resource(contents),
+ )
+
+
+def _anchor_2019(
+ specification: Specification[Schema],
+ contents: Schema,
+) -> Iterable[Anchor[Schema]]:
+ if isinstance(contents, bool):
+ return []
+ anchor = contents.get("$anchor")
+ if anchor is None:
+ return []
+ return [
+ Anchor(
+ name=anchor,
+ resource=specification.create_resource(contents),
+ ),
+ ]
+
+
+def _legacy_anchor_in_dollar_id(
+ specification: Specification[Schema],
+ contents: Schema,
+) -> Iterable[Anchor[Schema]]:
+ if isinstance(contents, bool):
+ return []
+ id = contents.get("$id", "")
+ if not id.startswith("#"):
+ return []
+ return [
+ Anchor(
+ name=id[1:],
+ resource=specification.create_resource(contents),
+ ),
+ ]
+
+
+def _legacy_anchor_in_id(
+ specification: Specification[ObjectSchema],
+ contents: ObjectSchema,
+) -> Iterable[Anchor[ObjectSchema]]:
+ id = contents.get("id", "")
+ if not id.startswith("#"):
+ return []
+ return [
+ Anchor(
+ name=id[1:],
+ resource=specification.create_resource(contents),
+ ),
+ ]
+
+
+def _subresources_of(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ """
+ Create a callable returning JSON Schema specification-style subschemas.
+
+ Relies on specifying the set of keywords containing subschemas in their
+ values, in a subobject's values, or in a subarray.
+ """
+
+ def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
+ if isinstance(contents, bool):
+ return
+ for each in in_value:
+ if each in contents:
+ yield contents[each]
+ for each in in_subarray:
+ if each in contents:
+ yield from contents[each]
+ for each in in_subvalues:
+ if each in contents:
+ yield from contents[each].values()
+
+ return subresources_of
+
+
+def _subresources_of_with_crazy_items(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ """
+ Specifically handle older drafts where there are some funky keywords.
+ """
+
+ def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
+ if isinstance(contents, bool):
+ return
+ for each in in_value:
+ if each in contents:
+ yield contents[each]
+ for each in in_subarray:
+ if each in contents:
+ yield from contents[each]
+ for each in in_subvalues:
+ if each in contents:
+ yield from contents[each].values()
+
+ items = contents.get("items")
+ if items is not None:
+ if isinstance(items, Sequence):
+ yield from items
+ else:
+ yield items
+
+ return subresources_of
+
+
+def _subresources_of_with_crazy_items_dependencies(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ """
+ Specifically handle older drafts where there are some funky keywords.
+ """
+
+ def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
+ if isinstance(contents, bool):
+ return
+ for each in in_value:
+ if each in contents:
+ yield contents[each]
+ for each in in_subarray:
+ if each in contents:
+ yield from contents[each]
+ for each in in_subvalues:
+ if each in contents:
+ yield from contents[each].values()
+
+ items = contents.get("items")
+ if items is not None:
+ if isinstance(items, Sequence):
+ yield from items
+ else:
+ yield items
+ dependencies = contents.get("dependencies")
+ if dependencies is not None:
+ values = iter(dependencies.values())
+ value = next(values, None)
+ if isinstance(value, Mapping):
+ yield value
+ yield from values
+
+ return subresources_of
+
+
+def _subresources_of_with_crazy_aP_items_dependencies(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ """
+ Specifically handle even older drafts where there are some funky keywords.
+ """
+
+ def subresources_of(contents: ObjectSchema) -> Iterable[ObjectSchema]:
+ for each in in_value:
+ if each in contents:
+ yield contents[each]
+ for each in in_subarray:
+ if each in contents:
+ yield from contents[each]
+ for each in in_subvalues:
+ if each in contents:
+ yield from contents[each].values()
+
+ items = contents.get("items")
+ if items is not None:
+ if isinstance(items, Sequence):
+ yield from items
+ else:
+ yield items
+ dependencies = contents.get("dependencies")
+ if dependencies is not None:
+ values = iter(dependencies.values())
+ value = next(values, None)
+ if isinstance(value, Mapping):
+ yield value
+ yield from values
+
+ for each in "additionalItems", "additionalProperties":
+ value = contents.get(each)
+ if isinstance(value, Mapping):
+ yield value
+
+ return subresources_of
+
+
+def _maybe_in_subresource(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ in_child = in_subvalues | in_subarray
+
+ def maybe_in_subresource(
+ segments: Sequence[int | str],
+ resolver: _Resolver[Any],
+ subresource: Resource[Any],
+ ) -> _Resolver[Any]:
+ _segments = iter(segments)
+ for segment in _segments:
+ if segment not in in_value and (
+ segment not in in_child or next(_segments, None) is None
+ ):
+ return resolver
+ return resolver.in_subresource(subresource)
+
+ return maybe_in_subresource
+
+
+def _maybe_in_subresource_crazy_items(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ in_child = in_subvalues | in_subarray
+
+ def maybe_in_subresource(
+ segments: Sequence[int | str],
+ resolver: _Resolver[Any],
+ subresource: Resource[Any],
+ ) -> _Resolver[Any]:
+ _segments = iter(segments)
+ for segment in _segments:
+ if segment == "items" and isinstance(
+ subresource.contents,
+ Mapping,
+ ):
+ return resolver.in_subresource(subresource)
+ if segment not in in_value and (
+ segment not in in_child or next(_segments, None) is None
+ ):
+ return resolver
+ return resolver.in_subresource(subresource)
+
+ return maybe_in_subresource
+
+
+def _maybe_in_subresource_crazy_items_dependencies(
+ in_value: Set[str] = frozenset(),
+ in_subvalues: Set[str] = frozenset(),
+ in_subarray: Set[str] = frozenset(),
+):
+ in_child = in_subvalues | in_subarray
+
+ def maybe_in_subresource(
+ segments: Sequence[int | str],
+ resolver: _Resolver[Any],
+ subresource: Resource[Any],
+ ) -> _Resolver[Any]:
+ _segments = iter(segments)
+ for segment in _segments:
+ if segment in {"items", "dependencies"} and isinstance(
+ subresource.contents,
+ Mapping,
+ ):
+ return resolver.in_subresource(subresource)
+ if segment not in in_value and (
+ segment not in in_child or next(_segments, None) is None
+ ):
+ return resolver
+ return resolver.in_subresource(subresource)
+
+ return maybe_in_subresource
+
+
+#: JSON Schema draft 2020-12
+DRAFT202012 = Specification(
+ name="draft2020-12",
+ id_of=_dollar_id,
+ subresources_of=_subresources_of(
+ in_value={
+ "additionalProperties",
+ "contains",
+ "contentSchema",
+ "else",
+ "if",
+ "items",
+ "not",
+ "propertyNames",
+ "then",
+ "unevaluatedItems",
+ "unevaluatedProperties",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"},
+ in_subvalues={
+ "$defs",
+ "definitions",
+ "dependentSchemas",
+ "patternProperties",
+ "properties",
+ },
+ ),
+ anchors_in=_anchor,
+ maybe_in_subresource=_maybe_in_subresource(
+ in_value={
+ "additionalProperties",
+ "contains",
+ "contentSchema",
+ "else",
+ "if",
+ "items",
+ "not",
+ "propertyNames",
+ "then",
+ "unevaluatedItems",
+ "unevaluatedProperties",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"},
+ in_subvalues={
+ "$defs",
+ "definitions",
+ "dependentSchemas",
+ "patternProperties",
+ "properties",
+ },
+ ),
+)
+#: JSON Schema draft 2019-09
+DRAFT201909 = Specification(
+ name="draft2019-09",
+ id_of=_dollar_id,
+ subresources_of=_subresources_of_with_crazy_items(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "contentSchema",
+ "else",
+ "if",
+ "not",
+ "propertyNames",
+ "then",
+ "unevaluatedItems",
+ "unevaluatedProperties",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={
+ "$defs",
+ "definitions",
+ "dependentSchemas",
+ "patternProperties",
+ "properties",
+ },
+ ),
+ anchors_in=_anchor_2019, # type: ignore[reportGeneralTypeIssues] # TODO: check whether this is real
+ maybe_in_subresource=_maybe_in_subresource_crazy_items(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "contentSchema",
+ "else",
+ "if",
+ "not",
+ "propertyNames",
+ "then",
+ "unevaluatedItems",
+ "unevaluatedProperties",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={
+ "$defs",
+ "definitions",
+ "dependentSchemas",
+ "patternProperties",
+ "properties",
+ },
+ ),
+)
+#: JSON Schema draft 7
+DRAFT7 = Specification(
+ name="draft-07",
+ id_of=_legacy_dollar_id,
+ subresources_of=_subresources_of_with_crazy_items_dependencies(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "else",
+ "if",
+ "not",
+ "propertyNames",
+ "then",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+ anchors_in=_legacy_anchor_in_dollar_id, # type: ignore[reportGeneralTypeIssues] # TODO: check whether this is real
+ maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "else",
+ "if",
+ "not",
+ "propertyNames",
+ "then",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+)
+#: JSON Schema draft 6
+DRAFT6 = Specification(
+ name="draft-06",
+ id_of=_legacy_dollar_id,
+ subresources_of=_subresources_of_with_crazy_items_dependencies(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "not",
+ "propertyNames",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+ anchors_in=_legacy_anchor_in_dollar_id, # type: ignore[reportGeneralTypeIssues] # TODO: check whether this is real
+ maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
+ in_value={
+ "additionalItems",
+ "additionalProperties",
+ "contains",
+ "not",
+ "propertyNames",
+ },
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+)
+#: JSON Schema draft 4
+DRAFT4 = Specification(
+ name="draft-04",
+ id_of=_legacy_id,
+ subresources_of=_subresources_of_with_crazy_aP_items_dependencies(
+ in_value={"not"},
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+ anchors_in=_legacy_anchor_in_id,
+ maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
+ in_value={"additionalItems", "additionalProperties", "not"},
+ in_subarray={"allOf", "anyOf", "oneOf"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+)
+#: JSON Schema draft 3
+DRAFT3 = Specification(
+ name="draft-03",
+ id_of=_legacy_id,
+ subresources_of=_subresources_of_with_crazy_aP_items_dependencies(
+ in_subarray={"extends"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+ anchors_in=_legacy_anchor_in_id,
+ maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
+ in_value={"additionalItems", "additionalProperties"},
+ in_subarray={"extends"},
+ in_subvalues={"definitions", "patternProperties", "properties"},
+ ),
+)
+
+
+_SPECIFICATIONS: Registry[Specification[Schema]] = Registry(
+ { # type: ignore[reportGeneralTypeIssues] # :/ internal vs external types
+ dialect_id: Resource.opaque(specification)
+ for dialect_id, specification in [
+ ("https://json-schema.org/draft/2020-12/schema", DRAFT202012),
+ ("https://json-schema.org/draft/2019-09/schema", DRAFT201909),
+ ("http://json-schema.org/draft-07/schema", DRAFT7),
+ ("http://json-schema.org/draft-06/schema", DRAFT6),
+ ("http://json-schema.org/draft-04/schema", DRAFT4),
+ ("http://json-schema.org/draft-03/schema", DRAFT3),
+ ]
+ },
+)
+
+
+def specification_with(
+ dialect_id: URI,
+ default: Specification[Any] | _Unset = _UNSET,
+) -> Specification[Any]:
+ """
+ Retrieve the `Specification` with the given dialect identifier.
+
+ Raises:
+
+ `UnknownDialect`
+
+ if the given ``dialect_id`` isn't known
+
+ """
+ resource = _SPECIFICATIONS.get(dialect_id.rstrip("#"))
+ if resource is not None:
+ return resource.contents
+ if default is _UNSET:
+ raise UnknownDialect(dialect_id)
+ return default
+
+
+@frozen
+class DynamicAnchor:
+ """
+ Dynamic anchors, introduced in draft 2020.
+ """
+
+ name: str
+ resource: SchemaResource
+
+ def resolve(self, resolver: _Resolver[Schema]) -> _Resolved[Schema]:
+ """
+ Resolve this anchor dynamically.
+ """
+ last = self.resource
+ for uri, registry in resolver.dynamic_scope():
+ try:
+ anchor = registry.anchor(uri, self.name).value
+ except exceptions.NoSuchAnchor:
+ continue
+ if isinstance(anchor, DynamicAnchor):
+ last = anchor.resource
+ return _Resolved(
+ contents=last.contents,
+ resolver=resolver.in_subresource(last),
+ )
+
+
+def lookup_recursive_ref(resolver: _Resolver[Schema]) -> _Resolved[Schema]:
+ """
+ Recursive references (via recursive anchors), present only in draft 2019.
+
+ As per the 2019 specification (§ 8.2.4.2.1), only the ``#`` recursive
+ reference is supported (and is therefore assumed to be the relevant
+ reference).
+ """
+ resolved = resolver.lookup("#")
+ if isinstance(resolved.contents, Mapping) and resolved.contents.get(
+ "$recursiveAnchor",
+ ):
+ for uri, _ in resolver.dynamic_scope():
+ next_resolved = resolver.lookup(uri)
+ if not isinstance(
+ next_resolved.contents,
+ Mapping,
+ ) or not next_resolved.contents.get("$recursiveAnchor"):
+ break
+ resolved = next_resolved
+ return resolved
diff --git a/valley/lib/python3.10/site-packages/referencing/py.typed b/valley/lib/python3.10/site-packages/referencing/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/referencing/retrieval.py b/valley/lib/python3.10/site-packages/referencing/retrieval.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3719cf342b920685175aed8d67936ab65b6801e
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/retrieval.py
@@ -0,0 +1,87 @@
+"""
+Helpers related to (dynamic) resource retrieval.
+"""
+
+from __future__ import annotations
+
+from functools import lru_cache
+from typing import TYPE_CHECKING, Callable, TypeVar
+import json
+
+from referencing import Resource
+
+if TYPE_CHECKING:
+ from referencing.typing import URI, D, Retrieve
+
+#: A serialized document (e.g. a JSON string)
+_T = TypeVar("_T")
+
+
+def to_cached_resource(
+ cache: Callable[[Retrieve[D]], Retrieve[D]] | None = None,
+ loads: Callable[[_T], D] = json.loads,
+ from_contents: Callable[[D], Resource[D]] = Resource.from_contents,
+) -> Callable[[Callable[[URI], _T]], Retrieve[D]]:
+ """
+ Create a retriever which caches its return values from a simpler callable.
+
+ Takes a function which returns things like serialized JSON (strings) and
+ returns something suitable for passing to `Registry` as a retrieve
+ function.
+
+ This decorator both reduces a small bit of boilerplate for a common case
+ (deserializing JSON from strings and creating `Resource` objects from the
+ result) as well as makes the probable need for caching a bit easier.
+ Retrievers which otherwise do expensive operations (like hitting the
+ network) might otherwise be called repeatedly.
+
+ Examples
+ --------
+
+ .. testcode::
+
+ from referencing import Registry
+ from referencing.typing import URI
+ import referencing.retrieval
+
+
+ @referencing.retrieval.to_cached_resource()
+ def retrieve(uri: URI):
+ print(f"Retrieved {uri}")
+
+ # Normally, go get some expensive JSON from the network, a file ...
+ return '''
+ {
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "foo": "bar"
+ }
+ '''
+
+ one = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo")
+ print(one.value.contents["foo"])
+
+ # Retrieving the same URI again reuses the same value (and thus doesn't
+ # print another retrieval message here)
+ two = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo")
+ print(two.value.contents["foo"])
+
+ .. testoutput::
+
+ Retrieved urn:example:foo
+ bar
+ bar
+
+ """
+ if cache is None:
+ cache = lru_cache(maxsize=None)
+
+ def decorator(retrieve: Callable[[URI], _T]):
+ @cache
+ def cached_retrieve(uri: URI):
+ response = retrieve(uri)
+ contents = loads(response)
+ return from_contents(contents)
+
+ return cached_retrieve
+
+ return decorator
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/__init__.py b/valley/lib/python3.10/site-packages/referencing/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc b/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f04185e5ff8158c1074b7b43e42322b59ae271a
Binary files /dev/null and b/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_core.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc b/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d11664d71418fd15222cb6ad2078fadc723ea53
Binary files /dev/null and b/valley/lib/python3.10/site-packages/referencing/tests/__pycache__/test_jsonschema.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/test_core.py b/valley/lib/python3.10/site-packages/referencing/tests/test_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..3edddbc3d96581e1c74069baa873900495366bab
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/tests/test_core.py
@@ -0,0 +1,1057 @@
+from rpds import HashTrieMap
+import pytest
+
+from referencing import Anchor, Registry, Resource, Specification, exceptions
+from referencing.jsonschema import DRAFT202012
+
+ID_AND_CHILDREN = Specification(
+ name="id-and-children",
+ id_of=lambda contents: contents.get("ID"),
+ subresources_of=lambda contents: contents.get("children", []),
+ anchors_in=lambda specification, contents: [
+ Anchor(
+ name=name,
+ resource=specification.create_resource(contents=each),
+ )
+ for name, each in contents.get("anchors", {}).items()
+ ],
+ maybe_in_subresource=lambda segments, resolver, subresource: (
+ resolver.in_subresource(subresource)
+ if not len(segments) % 2
+ and all(each == "children" for each in segments[::2])
+ else resolver
+ ),
+)
+
+
+def blow_up(uri): # pragma: no cover
+ """
+ A retriever suitable for use in tests which expect it never to be used.
+ """
+ raise RuntimeError("This retrieve function expects to never be called!")
+
+
+class TestRegistry:
+ def test_with_resource(self):
+ """
+ Adding a resource to the registry then allows re-retrieving it.
+ """
+
+ resource = Resource.opaque(contents={"foo": "bar"})
+ uri = "urn:example"
+ registry = Registry().with_resource(uri=uri, resource=resource)
+ assert registry[uri] is resource
+
+ def test_with_resources(self):
+ """
+ Adding multiple resources to the registry is like adding each one.
+ """
+
+ one = Resource.opaque(contents={})
+ two = Resource(contents={"foo": "bar"}, specification=ID_AND_CHILDREN)
+ registry = Registry().with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/foo/bar", two),
+ ],
+ )
+ assert registry == Registry().with_resource(
+ uri="http://example.com/1",
+ resource=one,
+ ).with_resource(
+ uri="http://example.com/foo/bar",
+ resource=two,
+ )
+
+ def test_matmul_resource(self):
+ uri = "urn:example:resource"
+ resource = ID_AND_CHILDREN.create_resource({"ID": uri, "foo": 12})
+ registry = resource @ Registry()
+ assert registry == Registry().with_resource(uri, resource)
+
+ def test_matmul_many_resources(self):
+ one_uri = "urn:example:one"
+ one = ID_AND_CHILDREN.create_resource({"ID": one_uri, "foo": 12})
+
+ two_uri = "urn:example:two"
+ two = ID_AND_CHILDREN.create_resource({"ID": two_uri, "foo": 12})
+
+ registry = [one, two] @ Registry()
+ assert registry == Registry().with_resources(
+ [(one_uri, one), (two_uri, two)],
+ )
+
+ def test_matmul_resource_without_id(self):
+ resource = Resource.opaque(contents={"foo": "bar"})
+ with pytest.raises(exceptions.NoInternalID) as e:
+ resource @ Registry()
+ assert e.value == exceptions.NoInternalID(resource=resource)
+
+ def test_with_contents_from_json_schema(self):
+ uri = "urn:example"
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ registry = Registry().with_contents([(uri, schema)])
+
+ expected = Resource(contents=schema, specification=DRAFT202012)
+ assert registry[uri] == expected
+
+ def test_with_contents_and_default_specification(self):
+ uri = "urn:example"
+ registry = Registry().with_contents(
+ [(uri, {"foo": "bar"})],
+ default_specification=Specification.OPAQUE,
+ )
+ assert registry[uri] == Resource.opaque({"foo": "bar"})
+
+ def test_len(self):
+ total = 5
+ registry = Registry().with_contents(
+ [(str(i), {"foo": "bar"}) for i in range(total)],
+ default_specification=Specification.OPAQUE,
+ )
+ assert len(registry) == total
+
+ def test_bool_empty(self):
+ assert not Registry()
+
+ def test_bool_not_empty(self):
+ registry = Registry().with_contents(
+ [(str(i), {"foo": "bar"}) for i in range(3)],
+ default_specification=Specification.OPAQUE,
+ )
+ assert registry
+
+ def test_iter(self):
+ registry = Registry().with_contents(
+ [(str(i), {"foo": "bar"}) for i in range(8)],
+ default_specification=Specification.OPAQUE,
+ )
+ assert set(registry) == {str(i) for i in range(8)}
+
+ def test_crawl_still_has_top_level_resource(self):
+ resource = Resource.opaque({"foo": "bar"})
+ uri = "urn:example"
+ registry = Registry({uri: resource}).crawl()
+ assert registry[uri] is resource
+
+ def test_crawl_finds_a_subresource(self):
+ child_id = "urn:child"
+ root = ID_AND_CHILDREN.create_resource(
+ {"ID": "urn:root", "children": [{"ID": child_id, "foo": 12}]},
+ )
+ registry = root @ Registry()
+ with pytest.raises(LookupError):
+ registry[child_id]
+
+ expected = ID_AND_CHILDREN.create_resource({"ID": child_id, "foo": 12})
+ assert registry.crawl()[child_id] == expected
+
+ def test_crawl_finds_anchors_with_id(self):
+ resource = ID_AND_CHILDREN.create_resource(
+ {"ID": "urn:bar", "anchors": {"foo": 12}},
+ )
+ registry = resource @ Registry()
+
+ assert registry.crawl().anchor(resource.id(), "foo").value == Anchor(
+ name="foo",
+ resource=ID_AND_CHILDREN.create_resource(12),
+ )
+
+ def test_crawl_finds_anchors_no_id(self):
+ resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
+ registry = Registry().with_resource("urn:root", resource)
+
+ assert registry.crawl().anchor("urn:root", "foo").value == Anchor(
+ name="foo",
+ resource=ID_AND_CHILDREN.create_resource(12),
+ )
+
+ def test_contents(self):
+ resource = Resource.opaque({"foo": "bar"})
+ uri = "urn:example"
+ registry = Registry().with_resource(uri, resource)
+ assert registry.contents(uri) == {"foo": "bar"}
+
+ def test_getitem_strips_empty_fragments(self):
+ uri = "http://example.com/"
+ resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
+ registry = resource @ Registry()
+ assert registry[uri] == registry[uri + "#"] == resource
+
+ def test_contents_strips_empty_fragments(self):
+ uri = "http://example.com/"
+ resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
+ registry = resource @ Registry()
+ assert (
+ registry.contents(uri)
+ == registry.contents(uri + "#")
+ == {"ID": uri + "#"}
+ )
+
+ def test_contents_nonexistent_resource(self):
+ registry = Registry()
+ with pytest.raises(exceptions.NoSuchResource) as e:
+ registry.contents("urn:example")
+ assert e.value == exceptions.NoSuchResource(ref="urn:example")
+
+ def test_crawled_anchor(self):
+ resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}})
+ registry = Registry().with_resource("urn:example", resource)
+ retrieved = registry.anchor("urn:example", "foo")
+ assert retrieved.value == Anchor(
+ name="foo",
+ resource=ID_AND_CHILDREN.create_resource("bar"),
+ )
+ assert retrieved.registry == registry.crawl()
+
+ def test_anchor_in_nonexistent_resource(self):
+ registry = Registry()
+ with pytest.raises(exceptions.NoSuchResource) as e:
+ registry.anchor("urn:example", "foo")
+ assert e.value == exceptions.NoSuchResource(ref="urn:example")
+
+ def test_init(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = Registry(
+ {
+ "http://example.com/1": one,
+ "http://example.com/foo/bar": two,
+ },
+ )
+ assert (
+ registry
+ == Registry()
+ .with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/foo/bar", two),
+ ],
+ )
+ .crawl()
+ )
+
+ def test_dict_conversion(self):
+ """
+ Passing a `dict` to `Registry` gets converted to a `HashTrieMap`.
+
+ So continuing to use the registry works.
+ """
+
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = Registry(
+ {"http://example.com/1": one},
+ ).with_resource("http://example.com/foo/bar", two)
+ assert (
+ registry.crawl()
+ == Registry()
+ .with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/foo/bar", two),
+ ],
+ )
+ .crawl()
+ )
+
+ def test_no_such_resource(self):
+ registry = Registry()
+ with pytest.raises(exceptions.NoSuchResource) as e:
+ registry["urn:bigboom"]
+ assert e.value == exceptions.NoSuchResource(ref="urn:bigboom")
+
+ def test_combine(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
+ four = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
+
+ first = Registry({"http://example.com/1": one})
+ second = Registry().with_resource("http://example.com/foo/bar", two)
+ third = Registry(
+ {
+ "http://example.com/1": one,
+ "http://example.com/baz": three,
+ },
+ )
+ fourth = (
+ Registry()
+ .with_resource(
+ "http://example.com/foo/quux",
+ four,
+ )
+ .crawl()
+ )
+ assert first.combine(second, third, fourth) == Registry(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/baz", three),
+ ("http://example.com/foo/quux", four),
+ ],
+ anchors=HashTrieMap(
+ {
+ ("http://example.com/foo/quux", "foo"): Anchor(
+ name="foo",
+ resource=ID_AND_CHILDREN.create_resource(12),
+ ),
+ },
+ ),
+ ).with_resource("http://example.com/foo/bar", two)
+
+ def test_combine_self(self):
+ """
+ Combining a registry with itself short-circuits.
+
+ This is a performance optimization -- otherwise we do lots more work
+ (in jsonschema this seems to correspond to making the test suite take
+ *3x* longer).
+ """
+
+ registry = Registry({"urn:foo": "bar"})
+ assert registry.combine(registry) is registry
+
+ def test_combine_with_uncrawled_resources(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
+
+ first = Registry().with_resource("http://example.com/1", one)
+ second = Registry().with_resource("http://example.com/foo/bar", two)
+ third = Registry(
+ {
+ "http://example.com/1": one,
+ "http://example.com/baz": three,
+ },
+ )
+ expected = Registry(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/foo/bar", two),
+ ("http://example.com/baz", three),
+ ],
+ )
+ combined = first.combine(second, third)
+ assert combined != expected
+ assert combined.crawl() == expected
+
+ def test_combine_with_single_retrieve(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
+
+ def retrieve(uri): # pragma: no cover
+ pass
+
+ first = Registry().with_resource("http://example.com/1", one)
+ second = Registry(
+ retrieve=retrieve,
+ ).with_resource("http://example.com/2", two)
+ third = Registry().with_resource("http://example.com/3", three)
+
+ assert first.combine(second, third) == Registry(
+ retrieve=retrieve,
+ ).with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/2", two),
+ ("http://example.com/3", three),
+ ],
+ )
+ assert second.combine(first, third) == Registry(
+ retrieve=retrieve,
+ ).with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/2", two),
+ ("http://example.com/3", three),
+ ],
+ )
+
+ def test_combine_with_common_retrieve(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
+
+ def retrieve(uri): # pragma: no cover
+ pass
+
+ first = Registry(retrieve=retrieve).with_resource(
+ "http://example.com/1",
+ one,
+ )
+ second = Registry(
+ retrieve=retrieve,
+ ).with_resource("http://example.com/2", two)
+ third = Registry(retrieve=retrieve).with_resource(
+ "http://example.com/3",
+ three,
+ )
+
+ assert first.combine(second, third) == Registry(
+ retrieve=retrieve,
+ ).with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/2", two),
+ ("http://example.com/3", three),
+ ],
+ )
+ assert second.combine(first, third) == Registry(
+ retrieve=retrieve,
+ ).with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/2", two),
+ ("http://example.com/3", three),
+ ],
+ )
+
+ def test_combine_conflicting_retrieve(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
+
+ def foo_retrieve(uri): # pragma: no cover
+ pass
+
+ def bar_retrieve(uri): # pragma: no cover
+ pass
+
+ first = Registry(retrieve=foo_retrieve).with_resource(
+ "http://example.com/1",
+ one,
+ )
+ second = Registry().with_resource("http://example.com/2", two)
+ third = Registry(retrieve=bar_retrieve).with_resource(
+ "http://example.com/3",
+ three,
+ )
+
+ with pytest.raises(Exception, match="conflict.*retriev"):
+ first.combine(second, third)
+
+ def test_remove(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = Registry({"urn:foo": one, "urn:bar": two})
+ assert registry.remove("urn:foo") == Registry({"urn:bar": two})
+
+ def test_remove_uncrawled(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = Registry().with_resources(
+ [("urn:foo", one), ("urn:bar", two)],
+ )
+ assert registry.remove("urn:foo") == Registry().with_resource(
+ "urn:bar",
+ two,
+ )
+
+ def test_remove_with_anchors(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}})
+ registry = (
+ Registry()
+ .with_resources(
+ [("urn:foo", one), ("urn:bar", two)],
+ )
+ .crawl()
+ )
+ assert (
+ registry.remove("urn:bar")
+ == Registry()
+ .with_resource(
+ "urn:foo",
+ one,
+ )
+ .crawl()
+ )
+
+ def test_remove_nonexistent_uri(self):
+ with pytest.raises(exceptions.NoSuchResource) as e:
+ Registry().remove("urn:doesNotExist")
+ assert e.value == exceptions.NoSuchResource(ref="urn:doesNotExist")
+
+ def test_retrieve(self):
+ foo = Resource.opaque({"foo": "bar"})
+ registry = Registry(retrieve=lambda uri: foo)
+ assert registry.get_or_retrieve("urn:example").value == foo
+
+ def test_retrieve_arbitrary_exception(self):
+ foo = Resource.opaque({"foo": "bar"})
+
+ def retrieve(uri):
+ if uri == "urn:succeed":
+ return foo
+ raise Exception("Oh no!")
+
+ registry = Registry(retrieve=retrieve)
+ assert registry.get_or_retrieve("urn:succeed").value == foo
+ with pytest.raises(exceptions.Unretrievable):
+ registry.get_or_retrieve("urn:uhoh")
+
+ def test_retrieve_no_such_resource(self):
+ foo = Resource.opaque({"foo": "bar"})
+
+ def retrieve(uri):
+ if uri == "urn:succeed":
+ return foo
+ raise exceptions.NoSuchResource(ref=uri)
+
+ registry = Registry(retrieve=retrieve)
+ assert registry.get_or_retrieve("urn:succeed").value == foo
+ with pytest.raises(exceptions.NoSuchResource):
+ registry.get_or_retrieve("urn:uhoh")
+
+ def test_retrieve_cannot_determine_specification(self):
+ def retrieve(uri):
+ return Resource.from_contents({})
+
+ registry = Registry(retrieve=retrieve)
+ with pytest.raises(exceptions.CannotDetermineSpecification):
+ registry.get_or_retrieve("urn:uhoh")
+
+ def test_retrieve_already_available_resource(self):
+ foo = Resource.opaque({"foo": "bar"})
+ registry = Registry({"urn:example": foo}, retrieve=blow_up)
+ assert registry["urn:example"] == foo
+ assert registry.get_or_retrieve("urn:example").value == foo
+
+ def test_retrieve_first_checks_crawlable_resource(self):
+ child = ID_AND_CHILDREN.create_resource({"ID": "urn:child", "foo": 12})
+ root = ID_AND_CHILDREN.create_resource({"children": [child.contents]})
+ registry = Registry(retrieve=blow_up).with_resource("urn:root", root)
+ assert registry.crawl()["urn:child"] == child
+
+ def test_resolver(self):
+ one = Resource.opaque(contents={})
+ registry = Registry({"http://example.com": one})
+ resolver = registry.resolver(base_uri="http://example.com")
+ assert resolver.lookup("#").contents == {}
+
+ def test_resolver_with_root_identified(self):
+ root = ID_AND_CHILDREN.create_resource({"ID": "http://example.com"})
+ resolver = Registry().resolver_with_root(root)
+ assert resolver.lookup("http://example.com").contents == root.contents
+ assert resolver.lookup("#").contents == root.contents
+
+ def test_resolver_with_root_unidentified(self):
+ root = Resource.opaque(contents={})
+ resolver = Registry().resolver_with_root(root)
+ assert resolver.lookup("#").contents == root.contents
+
+ def test_repr(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = Registry().with_resources(
+ [
+ ("http://example.com/1", one),
+ ("http://example.com/foo/bar", two),
+ ],
+ )
+ assert repr(registry) == ""
+ assert repr(registry.crawl()) == ""
+
+ def test_repr_mixed_crawled(self):
+ one = Resource.opaque(contents={})
+ two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
+ registry = (
+ Registry(
+ {"http://example.com/1": one},
+ )
+ .crawl()
+ .with_resource(uri="http://example.com/foo/bar", resource=two)
+ )
+ assert repr(registry) == ""
+
+ def test_repr_one_resource(self):
+ registry = Registry().with_resource(
+ uri="http://example.com/1",
+ resource=Resource.opaque(contents={}),
+ )
+ assert repr(registry) == ""
+
+ def test_repr_empty(self):
+ assert repr(Registry()) == ""
+
+
+class TestResource:
+ def test_from_contents_from_json_schema(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ resource = Resource.from_contents(schema)
+ assert resource == Resource(contents=schema, specification=DRAFT202012)
+
+ def test_from_contents_with_no_discernible_information(self):
+ """
+ Creating a resource with no discernible way to see what
+ specification it belongs to (e.g. no ``$schema`` keyword for JSON
+ Schema) raises an error.
+ """
+
+ with pytest.raises(exceptions.CannotDetermineSpecification):
+ Resource.from_contents({"foo": "bar"})
+
+ def test_from_contents_with_no_discernible_information_and_default(self):
+ resource = Resource.from_contents(
+ {"foo": "bar"},
+ default_specification=Specification.OPAQUE,
+ )
+ assert resource == Resource.opaque(contents={"foo": "bar"})
+
+ def test_from_contents_unneeded_default(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ resource = Resource.from_contents(
+ schema,
+ default_specification=Specification.OPAQUE,
+ )
+ assert resource == Resource(
+ contents=schema,
+ specification=DRAFT202012,
+ )
+
+ def test_non_mapping_from_contents(self):
+ resource = Resource.from_contents(
+ True,
+ default_specification=ID_AND_CHILDREN,
+ )
+ assert resource == Resource(
+ contents=True,
+ specification=ID_AND_CHILDREN,
+ )
+
+ def test_from_contents_with_fallback(self):
+ resource = Resource.from_contents(
+ {"foo": "bar"},
+ default_specification=Specification.OPAQUE,
+ )
+ assert resource == Resource.opaque(contents={"foo": "bar"})
+
+ def test_id_delegates_to_specification(self):
+ specification = Specification(
+ name="",
+ id_of=lambda contents: "urn:fixedID",
+ subresources_of=lambda contents: [],
+ anchors_in=lambda specification, contents: [],
+ maybe_in_subresource=(
+ lambda segments, resolver, subresource: resolver
+ ),
+ )
+ resource = Resource(
+ contents={"foo": "baz"},
+ specification=specification,
+ )
+ assert resource.id() == "urn:fixedID"
+
+ def test_id_strips_empty_fragment(self):
+ uri = "http://example.com/"
+ root = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
+ assert root.id() == uri
+
+ def test_subresources_delegates_to_specification(self):
+ resource = ID_AND_CHILDREN.create_resource({"children": [{}, 12]})
+ assert list(resource.subresources()) == [
+ ID_AND_CHILDREN.create_resource(each) for each in [{}, 12]
+ ]
+
+ def test_subresource_with_different_specification(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ resource = ID_AND_CHILDREN.create_resource({"children": [schema]})
+ assert list(resource.subresources()) == [
+ DRAFT202012.create_resource(schema),
+ ]
+
+ def test_anchors_delegates_to_specification(self):
+ resource = ID_AND_CHILDREN.create_resource(
+ {"anchors": {"foo": {}, "bar": 1, "baz": ""}},
+ )
+ assert list(resource.anchors()) == [
+ Anchor(name="foo", resource=ID_AND_CHILDREN.create_resource({})),
+ Anchor(name="bar", resource=ID_AND_CHILDREN.create_resource(1)),
+ Anchor(name="baz", resource=ID_AND_CHILDREN.create_resource("")),
+ ]
+
+ def test_pointer_to_mapping(self):
+ resource = Resource.opaque(contents={"foo": "baz"})
+ resolver = Registry().resolver()
+ assert resource.pointer("/foo", resolver=resolver).contents == "baz"
+
+ def test_pointer_to_array(self):
+ resource = Resource.opaque(contents={"foo": {"bar": [3]}})
+ resolver = Registry().resolver()
+ assert resource.pointer("/foo/bar/0", resolver=resolver).contents == 3
+
+ def test_root_pointer(self):
+ contents = {"foo": "baz"}
+ resource = Resource.opaque(contents=contents)
+ resolver = Registry().resolver()
+ assert resource.pointer("", resolver=resolver).contents == contents
+
+ def test_opaque(self):
+ contents = {"foo": "bar"}
+ assert Resource.opaque(contents) == Resource(
+ contents=contents,
+ specification=Specification.OPAQUE,
+ )
+
+
+class TestResolver:
+ def test_lookup_exact_uri(self):
+ resource = Resource.opaque(contents={"foo": "baz"})
+ resolver = Registry({"http://example.com/1": resource}).resolver()
+ resolved = resolver.lookup("http://example.com/1")
+ assert resolved.contents == resource.contents
+
+ def test_lookup_subresource(self):
+ root = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "children": [
+ {"ID": "http://example.com/a", "foo": 12},
+ ],
+ },
+ )
+ registry = root @ Registry()
+ resolved = registry.resolver().lookup("http://example.com/a")
+ assert resolved.contents == {"ID": "http://example.com/a", "foo": 12}
+
+ def test_lookup_anchor_with_id(self):
+ root = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "anchors": {"foo": 12},
+ },
+ )
+ registry = root @ Registry()
+ resolved = registry.resolver().lookup("http://example.com/#foo")
+ assert resolved.contents == 12
+
+ def test_lookup_anchor_without_id(self):
+ root = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
+ resolver = Registry().with_resource("urn:example", root).resolver()
+ resolved = resolver.lookup("urn:example#foo")
+ assert resolved.contents == 12
+
+ def test_lookup_unknown_reference(self):
+ resolver = Registry().resolver()
+ ref = "http://example.com/does/not/exist"
+ with pytest.raises(exceptions.Unresolvable) as e:
+ resolver.lookup(ref)
+ assert e.value == exceptions.Unresolvable(ref=ref)
+
+ def test_lookup_non_existent_pointer(self):
+ resource = Resource.opaque({"foo": {}})
+ resolver = Registry({"http://example.com/1": resource}).resolver()
+ ref = "http://example.com/1#/foo/bar"
+ with pytest.raises(exceptions.Unresolvable) as e:
+ resolver.lookup(ref)
+ assert e.value == exceptions.PointerToNowhere(
+ ref="/foo/bar",
+ resource=resource,
+ )
+ assert str(e.value) == "'/foo/bar' does not exist within {'foo': {}}"
+
+ def test_lookup_non_existent_pointer_to_array_index(self):
+ resource = Resource.opaque([1, 2, 4, 8])
+ resolver = Registry({"http://example.com/1": resource}).resolver()
+ ref = "http://example.com/1#/10"
+ with pytest.raises(exceptions.Unresolvable) as e:
+ resolver.lookup(ref)
+ assert e.value == exceptions.PointerToNowhere(
+ ref="/10",
+ resource=resource,
+ )
+
+ def test_lookup_pointer_to_empty_string(self):
+ resolver = Registry().resolver_with_root(Resource.opaque({"": {}}))
+ assert resolver.lookup("#/").contents == {}
+
+ def test_lookup_non_existent_pointer_to_empty_string(self):
+ resource = Resource.opaque({"foo": {}})
+ resolver = Registry().resolver_with_root(resource)
+ with pytest.raises(
+ exceptions.Unresolvable,
+ match="^'/' does not exist within {'foo': {}}.*'#'",
+ ) as e:
+ resolver.lookup("#/")
+ assert e.value == exceptions.PointerToNowhere(
+ ref="/",
+ resource=resource,
+ )
+
+ def test_lookup_non_existent_anchor(self):
+ root = ID_AND_CHILDREN.create_resource({"anchors": {}})
+ resolver = Registry().with_resource("urn:example", root).resolver()
+ resolved = resolver.lookup("urn:example")
+ assert resolved.contents == root.contents
+
+ ref = "urn:example#noSuchAnchor"
+ with pytest.raises(exceptions.Unresolvable) as e:
+ resolver.lookup(ref)
+ assert "'noSuchAnchor' does not exist" in str(e.value)
+ assert e.value == exceptions.NoSuchAnchor(
+ ref="urn:example",
+ resource=root,
+ anchor="noSuchAnchor",
+ )
+
+ def test_lookup_invalid_JSON_pointerish_anchor(self):
+ resolver = Registry().resolver_with_root(
+ ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "foo": {"bar": 12},
+ },
+ ),
+ )
+
+ valid = resolver.lookup("#/foo/bar")
+ assert valid.contents == 12
+
+ with pytest.raises(exceptions.InvalidAnchor) as e:
+ resolver.lookup("#foo/bar")
+ assert " '#/foo/bar'" in str(e.value)
+
+ def test_lookup_retrieved_resource(self):
+ resource = Resource.opaque(contents={"foo": "baz"})
+ resolver = Registry(retrieve=lambda uri: resource).resolver()
+ resolved = resolver.lookup("http://example.com/")
+ assert resolved.contents == resource.contents
+
+ def test_lookup_failed_retrieved_resource(self):
+ """
+ Unretrievable exceptions are also wrapped in Unresolvable.
+ """
+
+ uri = "http://example.com/"
+
+ registry = Registry(retrieve=blow_up)
+ with pytest.raises(exceptions.Unretrievable):
+ registry.get_or_retrieve(uri)
+
+ resolver = registry.resolver()
+ with pytest.raises(exceptions.Unresolvable):
+ resolver.lookup(uri)
+
+ def test_repeated_lookup_from_retrieved_resource(self):
+ """
+ A (custom-)retrieved resource is added to the registry returned by
+ looking it up.
+ """
+ resource = Resource.opaque(contents={"foo": "baz"})
+ once = [resource]
+
+ def retrieve(uri):
+ return once.pop()
+
+ resolver = Registry(retrieve=retrieve).resolver()
+ resolved = resolver.lookup("http://example.com/")
+ assert resolved.contents == resource.contents
+
+ resolved = resolved.resolver.lookup("http://example.com/")
+ assert resolved.contents == resource.contents
+
+ def test_repeated_anchor_lookup_from_retrieved_resource(self):
+ resource = Resource.opaque(contents={"foo": "baz"})
+ once = [resource]
+
+ def retrieve(uri):
+ return once.pop()
+
+ resolver = Registry(retrieve=retrieve).resolver()
+ resolved = resolver.lookup("http://example.com/")
+ assert resolved.contents == resource.contents
+
+ resolved = resolved.resolver.lookup("#")
+ assert resolved.contents == resource.contents
+
+ # FIXME: The tests below aren't really representable in the current
+ # suite, though we should probably think of ways to do so.
+
+ def test_in_subresource(self):
+ root = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "children": [
+ {
+ "ID": "child/",
+ "children": [{"ID": "grandchild"}],
+ },
+ ],
+ },
+ )
+ registry = root @ Registry()
+
+ resolver = registry.resolver()
+ first = resolver.lookup("http://example.com/")
+ assert first.contents == root.contents
+
+ with pytest.raises(exceptions.Unresolvable):
+ first.resolver.lookup("grandchild")
+
+ sub = first.resolver.in_subresource(
+ ID_AND_CHILDREN.create_resource(first.contents["children"][0]),
+ )
+ second = sub.lookup("grandchild")
+ assert second.contents == {"ID": "grandchild"}
+
+ def test_in_pointer_subresource(self):
+ root = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "children": [
+ {
+ "ID": "child/",
+ "children": [{"ID": "grandchild"}],
+ },
+ ],
+ },
+ )
+ registry = root @ Registry()
+
+ resolver = registry.resolver()
+ first = resolver.lookup("http://example.com/")
+ assert first.contents == root.contents
+
+ with pytest.raises(exceptions.Unresolvable):
+ first.resolver.lookup("grandchild")
+
+ second = first.resolver.lookup("#/children/0")
+ third = second.resolver.lookup("grandchild")
+ assert third.contents == {"ID": "grandchild"}
+
+ def test_dynamic_scope(self):
+ one = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/",
+ "children": [
+ {
+ "ID": "child/",
+ "children": [{"ID": "grandchild"}],
+ },
+ ],
+ },
+ )
+ two = ID_AND_CHILDREN.create_resource(
+ {
+ "ID": "http://example.com/two",
+ "children": [{"ID": "two-child/"}],
+ },
+ )
+ registry = [one, two] @ Registry()
+
+ resolver = registry.resolver()
+ first = resolver.lookup("http://example.com/")
+ second = first.resolver.lookup("#/children/0")
+ third = second.resolver.lookup("grandchild")
+ fourth = third.resolver.lookup("http://example.com/two")
+ assert list(fourth.resolver.dynamic_scope()) == [
+ ("http://example.com/child/grandchild", fourth.resolver._registry),
+ ("http://example.com/child/", fourth.resolver._registry),
+ ("http://example.com/", fourth.resolver._registry),
+ ]
+ assert list(third.resolver.dynamic_scope()) == [
+ ("http://example.com/child/", third.resolver._registry),
+ ("http://example.com/", third.resolver._registry),
+ ]
+ assert list(second.resolver.dynamic_scope()) == [
+ ("http://example.com/", second.resolver._registry),
+ ]
+ assert list(first.resolver.dynamic_scope()) == []
+
+
+class TestSpecification:
+ def test_create_resource(self):
+ specification = Specification(
+ name="",
+ id_of=lambda contents: "urn:fixedID",
+ subresources_of=lambda contents: [],
+ anchors_in=lambda specification, contents: [],
+ maybe_in_subresource=(
+ lambda segments, resolver, subresource: resolver
+ ),
+ )
+ resource = specification.create_resource(contents={"foo": "baz"})
+ assert resource == Resource(
+ contents={"foo": "baz"},
+ specification=specification,
+ )
+ assert resource.id() == "urn:fixedID"
+
+ def test_detect_from_json_schema(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ specification = Specification.detect(schema)
+ assert specification == DRAFT202012
+
+ def test_detect_with_no_discernible_information(self):
+ with pytest.raises(exceptions.CannotDetermineSpecification):
+ Specification.detect({"foo": "bar"})
+
+ def test_detect_with_non_URI_schema(self):
+ with pytest.raises(exceptions.CannotDetermineSpecification):
+ Specification.detect({"$schema": 37})
+
+ def test_detect_with_no_discernible_information_and_default(self):
+ specification = Specification.OPAQUE.detect({"foo": "bar"})
+ assert specification is Specification.OPAQUE
+
+ def test_detect_unneeded_default(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ specification = Specification.OPAQUE.detect(schema)
+ assert specification == DRAFT202012
+
+ def test_non_mapping_detect(self):
+ with pytest.raises(exceptions.CannotDetermineSpecification):
+ Specification.detect(True)
+
+ def test_non_mapping_detect_with_default(self):
+ specification = ID_AND_CHILDREN.detect(True)
+ assert specification is ID_AND_CHILDREN
+
+ def test_detect_with_fallback(self):
+ specification = Specification.OPAQUE.detect({"foo": "bar"})
+ assert specification is Specification.OPAQUE
+
+ def test_repr(self):
+ assert (
+ repr(ID_AND_CHILDREN) == ""
+ )
+
+
+class TestOpaqueSpecification:
+ THINGS = [{"foo": "bar"}, True, 37, "foo", object()]
+
+ @pytest.mark.parametrize("thing", THINGS)
+ def test_no_id(self, thing):
+ """
+ An arbitrary thing has no ID.
+ """
+
+ assert Specification.OPAQUE.id_of(thing) is None
+
+ @pytest.mark.parametrize("thing", THINGS)
+ def test_no_subresources(self, thing):
+ """
+ An arbitrary thing has no subresources.
+ """
+
+ assert list(Specification.OPAQUE.subresources_of(thing)) == []
+
+ @pytest.mark.parametrize("thing", THINGS)
+ def test_no_anchors(self, thing):
+ """
+ An arbitrary thing has no anchors.
+ """
+
+ assert list(Specification.OPAQUE.anchors_in(thing)) == []
+
+
+@pytest.mark.parametrize(
+ "cls",
+ [Anchor, Registry, Resource, Specification, exceptions.PointerToNowhere],
+)
+def test_nonsubclassable(cls):
+ with pytest.raises(Exception, match="(?i)subclassing"):
+
+ class Boom(cls): # pragma: no cover
+ pass
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/test_exceptions.py b/valley/lib/python3.10/site-packages/referencing/tests/test_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..85cf99ecdd86c86e84df0b64f24aec6c447f4c08
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/tests/test_exceptions.py
@@ -0,0 +1,34 @@
+import itertools
+
+import pytest
+
+from referencing import Resource, exceptions
+
+
+def pairs(choices):
+ return itertools.combinations(choices, 2)
+
+
+TRUE = Resource.opaque(True)
+
+
+thunks = (
+ lambda: exceptions.CannotDetermineSpecification(TRUE),
+ lambda: exceptions.NoSuchResource("urn:example:foo"),
+ lambda: exceptions.NoInternalID(TRUE),
+ lambda: exceptions.InvalidAnchor(resource=TRUE, anchor="foo", ref="a#b"),
+ lambda: exceptions.NoSuchAnchor(resource=TRUE, anchor="foo", ref="a#b"),
+ lambda: exceptions.PointerToNowhere(resource=TRUE, ref="urn:example:foo"),
+ lambda: exceptions.Unresolvable("urn:example:foo"),
+ lambda: exceptions.Unretrievable("urn:example:foo"),
+)
+
+
+@pytest.mark.parametrize("one, two", pairs(each() for each in thunks))
+def test_eq_incompatible_types(one, two):
+ assert one != two
+
+
+@pytest.mark.parametrize("thunk", thunks)
+def test_hash(thunk):
+ assert thunk() in {thunk()}
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py b/valley/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py
new file mode 100644
index 0000000000000000000000000000000000000000..c80714d0132bebbec33401f42a2e06aee3fed9c6
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/tests/test_jsonschema.py
@@ -0,0 +1,382 @@
+import pytest
+
+from referencing import Registry, Resource, Specification
+import referencing.jsonschema
+
+
+@pytest.mark.parametrize(
+ "uri, expected",
+ [
+ (
+ "https://json-schema.org/draft/2020-12/schema",
+ referencing.jsonschema.DRAFT202012,
+ ),
+ (
+ "https://json-schema.org/draft/2019-09/schema",
+ referencing.jsonschema.DRAFT201909,
+ ),
+ (
+ "http://json-schema.org/draft-07/schema#",
+ referencing.jsonschema.DRAFT7,
+ ),
+ (
+ "http://json-schema.org/draft-06/schema#",
+ referencing.jsonschema.DRAFT6,
+ ),
+ (
+ "http://json-schema.org/draft-04/schema#",
+ referencing.jsonschema.DRAFT4,
+ ),
+ (
+ "http://json-schema.org/draft-03/schema#",
+ referencing.jsonschema.DRAFT3,
+ ),
+ ],
+)
+def test_schemas_with_explicit_schema_keywords_are_detected(uri, expected):
+ """
+ The $schema keyword in JSON Schema is a dialect identifier.
+ """
+ contents = {"$schema": uri}
+ resource = Resource.from_contents(contents)
+ assert resource == Resource(contents=contents, specification=expected)
+
+
+def test_unknown_dialect():
+ dialect_id = "http://example.com/unknown-json-schema-dialect-id"
+ with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo:
+ Resource.from_contents({"$schema": dialect_id})
+ assert excinfo.value.uri == dialect_id
+
+
+@pytest.mark.parametrize(
+ "id, specification",
+ [
+ ("$id", referencing.jsonschema.DRAFT202012),
+ ("$id", referencing.jsonschema.DRAFT201909),
+ ("$id", referencing.jsonschema.DRAFT7),
+ ("$id", referencing.jsonschema.DRAFT6),
+ ("id", referencing.jsonschema.DRAFT4),
+ ("id", referencing.jsonschema.DRAFT3),
+ ],
+)
+def test_id_of_mapping(id, specification):
+ uri = "http://example.com/some-schema"
+ assert specification.id_of({id: uri}) == uri
+
+
+@pytest.mark.parametrize(
+ "specification",
+ [
+ referencing.jsonschema.DRAFT202012,
+ referencing.jsonschema.DRAFT201909,
+ referencing.jsonschema.DRAFT7,
+ referencing.jsonschema.DRAFT6,
+ ],
+)
+@pytest.mark.parametrize("value", [True, False])
+def test_id_of_bool(specification, value):
+ assert specification.id_of(value) is None
+
+
+@pytest.mark.parametrize(
+ "specification",
+ [
+ referencing.jsonschema.DRAFT202012,
+ referencing.jsonschema.DRAFT201909,
+ referencing.jsonschema.DRAFT7,
+ referencing.jsonschema.DRAFT6,
+ ],
+)
+@pytest.mark.parametrize("value", [True, False])
+def test_anchors_in_bool(specification, value):
+ assert list(specification.anchors_in(value)) == []
+
+
+@pytest.mark.parametrize(
+ "specification",
+ [
+ referencing.jsonschema.DRAFT202012,
+ referencing.jsonschema.DRAFT201909,
+ referencing.jsonschema.DRAFT7,
+ referencing.jsonschema.DRAFT6,
+ ],
+)
+@pytest.mark.parametrize("value", [True, False])
+def test_subresources_of_bool(specification, value):
+ assert list(specification.subresources_of(value)) == []
+
+
+@pytest.mark.parametrize(
+ "uri, expected",
+ [
+ (
+ "https://json-schema.org/draft/2020-12/schema",
+ referencing.jsonschema.DRAFT202012,
+ ),
+ (
+ "https://json-schema.org/draft/2019-09/schema",
+ referencing.jsonschema.DRAFT201909,
+ ),
+ (
+ "http://json-schema.org/draft-07/schema#",
+ referencing.jsonschema.DRAFT7,
+ ),
+ (
+ "http://json-schema.org/draft-06/schema#",
+ referencing.jsonschema.DRAFT6,
+ ),
+ (
+ "http://json-schema.org/draft-04/schema#",
+ referencing.jsonschema.DRAFT4,
+ ),
+ (
+ "http://json-schema.org/draft-03/schema#",
+ referencing.jsonschema.DRAFT3,
+ ),
+ ],
+)
+def test_specification_with(uri, expected):
+ assert referencing.jsonschema.specification_with(uri) == expected
+
+
+@pytest.mark.parametrize(
+ "uri, expected",
+ [
+ (
+ "http://json-schema.org/draft-07/schema",
+ referencing.jsonschema.DRAFT7,
+ ),
+ (
+ "http://json-schema.org/draft-06/schema",
+ referencing.jsonschema.DRAFT6,
+ ),
+ (
+ "http://json-schema.org/draft-04/schema",
+ referencing.jsonschema.DRAFT4,
+ ),
+ (
+ "http://json-schema.org/draft-03/schema",
+ referencing.jsonschema.DRAFT3,
+ ),
+ ],
+)
+def test_specification_with_no_empty_fragment(uri, expected):
+ assert referencing.jsonschema.specification_with(uri) == expected
+
+
+def test_specification_with_unknown_dialect():
+ dialect_id = "http://example.com/unknown-json-schema-dialect-id"
+ with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo:
+ referencing.jsonschema.specification_with(dialect_id)
+ assert excinfo.value.uri == dialect_id
+
+
+def test_specification_with_default():
+ dialect_id = "http://example.com/unknown-json-schema-dialect-id"
+ specification = referencing.jsonschema.specification_with(
+ dialect_id,
+ default=Specification.OPAQUE,
+ )
+ assert specification is Specification.OPAQUE
+
+
+# FIXME: The tests below should move to the referencing suite but I haven't yet
+# figured out how to represent dynamic (& recursive) ref lookups in it.
+def test_lookup_trivial_dynamic_ref():
+ one = referencing.jsonschema.DRAFT202012.create_resource(
+ {"$dynamicAnchor": "foo"},
+ )
+ resolver = Registry().with_resource("http://example.com", one).resolver()
+ resolved = resolver.lookup("http://example.com#foo")
+ assert resolved.contents == one.contents
+
+
+def test_multiple_lookup_trivial_dynamic_ref():
+ TRUE = referencing.jsonschema.DRAFT202012.create_resource(True)
+ root = referencing.jsonschema.DRAFT202012.create_resource(
+ {
+ "$id": "http://example.com",
+ "$dynamicAnchor": "fooAnchor",
+ "$defs": {
+ "foo": {
+ "$id": "foo",
+ "$dynamicAnchor": "fooAnchor",
+ "$defs": {
+ "bar": True,
+ "baz": {
+ "$dynamicAnchor": "fooAnchor",
+ },
+ },
+ },
+ },
+ },
+ )
+ resolver = (
+ Registry()
+ .with_resources(
+ [
+ ("http://example.com", root),
+ ("http://example.com/foo/", TRUE),
+ ("http://example.com/foo/bar", root),
+ ],
+ )
+ .resolver()
+ )
+
+ first = resolver.lookup("http://example.com")
+ second = first.resolver.lookup("foo/")
+ resolver = second.resolver.lookup("bar").resolver
+ fourth = resolver.lookup("#fooAnchor")
+ assert fourth.contents == root.contents
+
+
+def test_multiple_lookup_dynamic_ref_to_nondynamic_ref():
+ one = referencing.jsonschema.DRAFT202012.create_resource(
+ {"$anchor": "fooAnchor"},
+ )
+ two = referencing.jsonschema.DRAFT202012.create_resource(
+ {
+ "$id": "http://example.com",
+ "$dynamicAnchor": "fooAnchor",
+ "$defs": {
+ "foo": {
+ "$id": "foo",
+ "$dynamicAnchor": "fooAnchor",
+ "$defs": {
+ "bar": True,
+ "baz": {
+ "$dynamicAnchor": "fooAnchor",
+ },
+ },
+ },
+ },
+ },
+ )
+ resolver = (
+ Registry()
+ .with_resources(
+ [
+ ("http://example.com", two),
+ ("http://example.com/foo/", one),
+ ("http://example.com/foo/bar", two),
+ ],
+ )
+ .resolver()
+ )
+
+ first = resolver.lookup("http://example.com")
+ second = first.resolver.lookup("foo/")
+ resolver = second.resolver.lookup("bar").resolver
+ fourth = resolver.lookup("#fooAnchor")
+ assert fourth.contents == two.contents
+
+
+def test_lookup_trivial_recursive_ref():
+ one = referencing.jsonschema.DRAFT201909.create_resource(
+ {"$recursiveAnchor": True},
+ )
+ resolver = Registry().with_resource("http://example.com", one).resolver()
+ first = resolver.lookup("http://example.com")
+ resolved = referencing.jsonschema.lookup_recursive_ref(
+ resolver=first.resolver,
+ )
+ assert resolved.contents == one.contents
+
+
+def test_lookup_recursive_ref_to_bool():
+ TRUE = referencing.jsonschema.DRAFT201909.create_resource(True)
+ registry = Registry({"http://example.com": TRUE})
+ resolved = referencing.jsonschema.lookup_recursive_ref(
+ resolver=registry.resolver(base_uri="http://example.com"),
+ )
+ assert resolved.contents == TRUE.contents
+
+
+def test_multiple_lookup_recursive_ref_to_bool():
+ TRUE = referencing.jsonschema.DRAFT201909.create_resource(True)
+ root = referencing.jsonschema.DRAFT201909.create_resource(
+ {
+ "$id": "http://example.com",
+ "$recursiveAnchor": True,
+ "$defs": {
+ "foo": {
+ "$id": "foo",
+ "$recursiveAnchor": True,
+ "$defs": {
+ "bar": True,
+ "baz": {
+ "$recursiveAnchor": True,
+ "$anchor": "fooAnchor",
+ },
+ },
+ },
+ },
+ },
+ )
+ resolver = (
+ Registry()
+ .with_resources(
+ [
+ ("http://example.com", root),
+ ("http://example.com/foo/", TRUE),
+ ("http://example.com/foo/bar", root),
+ ],
+ )
+ .resolver()
+ )
+
+ first = resolver.lookup("http://example.com")
+ second = first.resolver.lookup("foo/")
+ resolver = second.resolver.lookup("bar").resolver
+ fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver)
+ assert fourth.contents == root.contents
+
+
+def test_multiple_lookup_recursive_ref_with_nonrecursive_ref():
+ one = referencing.jsonschema.DRAFT201909.create_resource(
+ {"$recursiveAnchor": True},
+ )
+ two = referencing.jsonschema.DRAFT201909.create_resource(
+ {
+ "$id": "http://example.com",
+ "$recursiveAnchor": True,
+ "$defs": {
+ "foo": {
+ "$id": "foo",
+ "$recursiveAnchor": True,
+ "$defs": {
+ "bar": True,
+ "baz": {
+ "$recursiveAnchor": True,
+ "$anchor": "fooAnchor",
+ },
+ },
+ },
+ },
+ },
+ )
+ three = referencing.jsonschema.DRAFT201909.create_resource(
+ {"$recursiveAnchor": False},
+ )
+ resolver = (
+ Registry()
+ .with_resources(
+ [
+ ("http://example.com", three),
+ ("http://example.com/foo/", two),
+ ("http://example.com/foo/bar", one),
+ ],
+ )
+ .resolver()
+ )
+
+ first = resolver.lookup("http://example.com")
+ second = first.resolver.lookup("foo/")
+ resolver = second.resolver.lookup("bar").resolver
+ fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver)
+ assert fourth.contents == two.contents
+
+
+def test_empty_registry():
+ assert referencing.jsonschema.EMPTY_REGISTRY == Registry()
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py b/valley/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b8ae9177c197456bb3bbd62d4c1875bc95ff28b
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/tests/test_referencing_suite.py
@@ -0,0 +1,66 @@
+from pathlib import Path
+import json
+import os
+
+import pytest
+
+from referencing import Registry
+from referencing.exceptions import Unresolvable
+import referencing.jsonschema
+
+
+class SuiteNotFound(Exception):
+ def __str__(self): # pragma: no cover
+ return (
+ "Cannot find the referencing suite. "
+ "Set the REFERENCING_SUITE environment variable to the path to "
+ "the suite, or run the test suite from alongside a full checkout "
+ "of the git repository."
+ )
+
+
+if "REFERENCING_SUITE" in os.environ: # pragma: no cover
+ SUITE = Path(os.environ["REFERENCING_SUITE"]) / "tests"
+else:
+ SUITE = Path(__file__).parent.parent.parent / "suite/tests"
+if not SUITE.is_dir(): # pragma: no cover
+ raise SuiteNotFound()
+DIALECT_IDS = json.loads(SUITE.joinpath("specifications.json").read_text())
+
+
+@pytest.mark.parametrize(
+ "test_path",
+ [
+ pytest.param(each, id=f"{each.parent.name}-{each.stem}")
+ for each in SUITE.glob("*/**/*.json")
+ ],
+)
+def test_referencing_suite(test_path, subtests):
+ dialect_id = DIALECT_IDS[test_path.relative_to(SUITE).parts[0]]
+ specification = referencing.jsonschema.specification_with(dialect_id)
+ loaded = json.loads(test_path.read_text())
+ registry = loaded["registry"]
+ registry = Registry().with_resources(
+ (uri, specification.create_resource(contents))
+ for uri, contents in loaded["registry"].items()
+ )
+ for test in loaded["tests"]:
+ with subtests.test(test=test):
+ if "normalization" in test_path.stem:
+ pytest.xfail("APIs need to change for proper URL support.")
+
+ resolver = registry.resolver(base_uri=test.get("base_uri", ""))
+
+ if test.get("error"):
+ with pytest.raises(Unresolvable):
+ resolver.lookup(test["ref"])
+ else:
+ resolved = resolver.lookup(test["ref"])
+ assert resolved.contents == test["target"]
+
+ then = test.get("then")
+ while then: # pragma: no cover
+ with subtests.test(test=test, then=then):
+ resolved = resolved.resolver.lookup(then["ref"])
+ assert resolved.contents == then["target"]
+ then = then.get("then")
diff --git a/valley/lib/python3.10/site-packages/referencing/tests/test_retrieval.py b/valley/lib/python3.10/site-packages/referencing/tests/test_retrieval.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0a8f8ad9975d1a760bca14dea7b60d41fb8ea75
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/tests/test_retrieval.py
@@ -0,0 +1,106 @@
+from functools import lru_cache
+import json
+
+import pytest
+
+from referencing import Registry, Resource, exceptions
+from referencing.jsonschema import DRAFT202012
+from referencing.retrieval import to_cached_resource
+
+
+class TestToCachedResource:
+ def test_it_caches_retrieved_resources(self):
+ contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ stack = [json.dumps(contents)]
+
+ @to_cached_resource()
+ def retrieve(uri):
+ return stack.pop()
+
+ registry = Registry(retrieve=retrieve)
+
+ expected = Resource.from_contents(contents)
+
+ got = registry.get_or_retrieve("urn:example:schema")
+ assert got.value == expected
+
+ # And a second time we get the same value.
+ again = registry.get_or_retrieve("urn:example:schema")
+ assert again.value is got.value
+
+ def test_custom_loader(self):
+ contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ stack = [json.dumps(contents)[::-1]]
+
+ @to_cached_resource(loads=lambda s: json.loads(s[::-1]))
+ def retrieve(uri):
+ return stack.pop()
+
+ registry = Registry(retrieve=retrieve)
+
+ expected = Resource.from_contents(contents)
+
+ got = registry.get_or_retrieve("urn:example:schema")
+ assert got.value == expected
+
+ # And a second time we get the same value.
+ again = registry.get_or_retrieve("urn:example:schema")
+ assert again.value is got.value
+
+ def test_custom_from_contents(self):
+ contents = {}
+ stack = [json.dumps(contents)]
+
+ @to_cached_resource(from_contents=DRAFT202012.create_resource)
+ def retrieve(uri):
+ return stack.pop()
+
+ registry = Registry(retrieve=retrieve)
+
+ expected = DRAFT202012.create_resource(contents)
+
+ got = registry.get_or_retrieve("urn:example:schema")
+ assert got.value == expected
+
+ # And a second time we get the same value.
+ again = registry.get_or_retrieve("urn:example:schema")
+ assert again.value is got.value
+
+ def test_custom_cache(self):
+ schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
+ mapping = {
+ "urn:example:1": dict(schema, foo=1),
+ "urn:example:2": dict(schema, foo=2),
+ "urn:example:3": dict(schema, foo=3),
+ }
+
+ resources = {
+ uri: Resource.from_contents(contents)
+ for uri, contents in mapping.items()
+ }
+
+ @to_cached_resource(cache=lru_cache(maxsize=2))
+ def retrieve(uri):
+ return json.dumps(mapping.pop(uri))
+
+ registry = Registry(retrieve=retrieve)
+
+ got = registry.get_or_retrieve("urn:example:1")
+ assert got.value == resources["urn:example:1"]
+ assert registry.get_or_retrieve("urn:example:1").value is got.value
+ assert registry.get_or_retrieve("urn:example:1").value is got.value
+
+ got = registry.get_or_retrieve("urn:example:2")
+ assert got.value == resources["urn:example:2"]
+ assert registry.get_or_retrieve("urn:example:2").value is got.value
+ assert registry.get_or_retrieve("urn:example:2").value is got.value
+
+ # This still succeeds, but evicts the first URI
+ got = registry.get_or_retrieve("urn:example:3")
+ assert got.value == resources["urn:example:3"]
+ assert registry.get_or_retrieve("urn:example:3").value is got.value
+ assert registry.get_or_retrieve("urn:example:3").value is got.value
+
+ # And now this fails (as we popped the value out of `mapping`)
+ with pytest.raises(exceptions.Unretrievable):
+ registry.get_or_retrieve("urn:example:1")
diff --git a/valley/lib/python3.10/site-packages/referencing/typing.py b/valley/lib/python3.10/site-packages/referencing/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..d00b0185c5c329ed7cfacbd857d8f513fe8cedf9
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/referencing/typing.py
@@ -0,0 +1,63 @@
+"""
+Type-annotation related support for the referencing library.
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Protocol, TypeVar
+
+try:
+ from collections.abc import Mapping as Mapping
+
+ Mapping[str, str]
+except TypeError: # pragma: no cover
+ from typing import Mapping as Mapping
+
+
+if TYPE_CHECKING:
+ from referencing._core import Resolved, Resolver, Resource
+
+#: A URI which identifies a `Resource`.
+URI = str
+
+#: The type of documents within a registry.
+D = TypeVar("D")
+
+
+class Retrieve(Protocol[D]):
+ """
+ A retrieval callable, usable within a `Registry` for resource retrieval.
+
+ Does not make assumptions about where the resource might be coming from.
+ """
+
+ def __call__(self, uri: URI) -> Resource[D]:
+ """
+ Retrieve the resource with the given URI.
+
+ Raise `referencing.exceptions.NoSuchResource` if you wish to indicate
+ the retriever cannot lookup the given URI.
+ """
+ ...
+
+
+class Anchor(Protocol[D]):
+ """
+ An anchor within a `Resource`.
+
+ Beyond "simple" anchors, some specifications like JSON Schema's 2020
+ version have dynamic anchors.
+ """
+
+ @property
+ def name(self) -> str:
+ """
+ Return the name of this anchor.
+ """
+ ...
+
+ def resolve(self, resolver: Resolver[D]) -> Resolved[D]:
+ """
+ Return the resource for this anchor.
+ """
+ ...
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/INSTALLER b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..51f3442917839f8e0f0cccb52b3c10968ad0779e
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE
@@ -0,0 +1,3 @@
+This software is made available under the terms of *either* of the
+licenses found in LICENSE.APACHE2 or LICENSE.MIT. Contributions to are
+made under the terms of *both* these licenses.
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2 b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.APACHE2
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT
new file mode 100644
index 0000000000000000000000000000000000000000..b8bb97185926d7daed314609753173945ed4ff1a
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/LICENSE.MIT
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/METADATA b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..75e0057bbad50ca4c117b9130b92f1bed2720669
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/METADATA
@@ -0,0 +1,104 @@
+Metadata-Version: 2.1
+Name: sniffio
+Version: 1.3.1
+Summary: Sniff out which async library your code is running under
+Author-email: "Nathaniel J. Smith"
+License: MIT OR Apache-2.0
+Project-URL: Homepage, https://github.com/python-trio/sniffio
+Project-URL: Documentation, https://sniffio.readthedocs.io/
+Project-URL: Changelog, https://sniffio.readthedocs.io/en/latest/history.html
+Keywords: async,trio,asyncio
+Classifier: License :: OSI Approved :: MIT License
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Framework :: Trio
+Classifier: Framework :: AsyncIO
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Intended Audience :: Developers
+Classifier: Development Status :: 5 - Production/Stable
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: LICENSE.APACHE2
+License-File: LICENSE.MIT
+
+.. image:: https://img.shields.io/badge/chat-join%20now-blue.svg
+ :target: https://gitter.im/python-trio/general
+ :alt: Join chatroom
+
+.. image:: https://img.shields.io/badge/docs-read%20now-blue.svg
+ :target: https://sniffio.readthedocs.io/en/latest/?badge=latest
+ :alt: Documentation Status
+
+.. image:: https://img.shields.io/pypi/v/sniffio.svg
+ :target: https://pypi.org/project/sniffio
+ :alt: Latest PyPi version
+
+.. image:: https://img.shields.io/conda/vn/conda-forge/sniffio.svg
+ :target: https://anaconda.org/conda-forge/sniffio
+ :alt: Latest conda-forge version
+
+.. image:: https://travis-ci.org/python-trio/sniffio.svg?branch=master
+ :target: https://travis-ci.org/python-trio/sniffio
+ :alt: Automated test status
+
+.. image:: https://codecov.io/gh/python-trio/sniffio/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/python-trio/sniffio
+ :alt: Test coverage
+
+=================================================================
+sniffio: Sniff out which async library your code is running under
+=================================================================
+
+You're writing a library. You've decided to be ambitious, and support
+multiple async I/O packages, like `Trio
+`__, and `asyncio
+`__, and ... You've
+written a bunch of clever code to handle all the differences. But...
+how do you know *which* piece of clever code to run?
+
+This is a tiny package whose only purpose is to let you detect which
+async library your code is running under.
+
+* Documentation: https://sniffio.readthedocs.io
+
+* Bug tracker and source code: https://github.com/python-trio/sniffio
+
+* License: MIT or Apache License 2.0, your choice
+
+* Contributor guide: https://trio.readthedocs.io/en/latest/contributing.html
+
+* Code of conduct: Contributors are requested to follow our `code of
+ conduct
+ `_
+ in all project spaces.
+
+This library is maintained by the Trio project, as a service to the
+async Python community as a whole.
+
+
+Quickstart
+----------
+
+.. code-block:: python3
+
+ from sniffio import current_async_library
+ import trio
+ import asyncio
+
+ async def print_library():
+ library = current_async_library()
+ print("This is:", library)
+
+ # Prints "This is trio"
+ trio.run(print_library)
+
+ # Prints "This is asyncio"
+ asyncio.run(print_library())
+
+For more details, including how to add support to new async libraries,
+`please peruse our fine manual `__.
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/RECORD b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..9ceec6520d2e9b81c8d3f3107ff3a1cf754a8dbd
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/RECORD
@@ -0,0 +1,20 @@
+sniffio-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+sniffio-1.3.1.dist-info/LICENSE,sha256=ZSyHhIjRRWNh4Iw_hgf9e6WYkqFBA9Fczk_5PIW1zIs,185
+sniffio-1.3.1.dist-info/LICENSE.APACHE2,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+sniffio-1.3.1.dist-info/LICENSE.MIT,sha256=Pm2uVV65J4f8gtHUg1Vnf0VMf2Wus40_nnK_mj2vA0s,1046
+sniffio-1.3.1.dist-info/METADATA,sha256=CzGLVwmO3sz1heYKiJprantcQIbzqapi7_dqHTzuEtk,3875
+sniffio-1.3.1.dist-info/RECORD,,
+sniffio-1.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sniffio-1.3.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
+sniffio-1.3.1.dist-info/top_level.txt,sha256=v9UJXGs5CyddCVeAqXkQiWOrpp6Wtx6GeRrPt9-jjHg,8
+sniffio/__init__.py,sha256=9WJEJlXu7yluP0YtI5SQ9M9OTQfbNHkadarK1vXGDPM,335
+sniffio/__pycache__/__init__.cpython-310.pyc,,
+sniffio/__pycache__/_impl.cpython-310.pyc,,
+sniffio/__pycache__/_version.cpython-310.pyc,,
+sniffio/_impl.py,sha256=UmUFMZpiuOrcjnuHhuYiYMxeCNWfqu9kBlaPf0xk6X8,2843
+sniffio/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sniffio/_tests/__pycache__/__init__.cpython-310.pyc,,
+sniffio/_tests/__pycache__/test_sniffio.cpython-310.pyc,,
+sniffio/_tests/test_sniffio.py,sha256=MMJZZJjQrUi95RANNM-a_55BZquA_gv4rHU1pevcTCM,2058
+sniffio/_version.py,sha256=iVes5xwsHeRzQDexBaAhyx_taNt2ucfA7CWAo4QDt6Q,89
+sniffio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/REQUESTED b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/WHEEL b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/top_level.txt b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..01c650244d0ccb6043c603b736fcf8d9e622bc71
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/sniffio-1.3.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+sniffio
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da47ad3ca6c0fc40707cd8585011fe02fa510d22
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/functionalization.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1be31fee89fbb5d9610e4a23ae5133500b33835
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/__pycache__/python.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/types/__init__.py b/valley/lib/python3.10/site-packages/torchgen/api/types/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3e2f9a431b45c7ff1b0357dcb0e24a508a38a87
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/torchgen/api/types/__init__.py
@@ -0,0 +1,3 @@
+from .types import *
+from .types_base import *
+from .signatures import * # isort:skip
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b206932b0fce08f0853d2a092aa028a649ed96d
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a673164bf110ffc6fd106ba5616ac3e278fb777
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17bf609f9b3accd89505a96a76710bca94b54232
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1aff41d38bb58d184100bbe99d7b1896739d794
Binary files /dev/null and b/valley/lib/python3.10/site-packages/torchgen/api/types/__pycache__/types_base.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/__init__.py b/valley/lib/python3.10/site-packages/triton/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..031c58fb16aca49b3ce0b6e77b8f5070164bf801
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/__init__.py
@@ -0,0 +1,73 @@
+"""isort:skip_file"""
+__version__ = '3.0.0'
+
+# ---------------------------------------
+# Note: import order is significant here.
+
+# submodules
+from .runtime import (
+ autotune,
+ Config,
+ heuristics,
+ JITFunction,
+ KernelInterface,
+ reinterpret,
+ TensorWrapper,
+ OutOfResources,
+ InterpreterError,
+ MockTensor,
+)
+from .runtime.jit import jit
+from .compiler import compile, CompilationError
+from .errors import TritonError
+
+from . import language
+from . import testing
+from . import tools
+
+__all__ = [
+ "autotune",
+ "cdiv",
+ "CompilationError",
+ "compile",
+ "Config",
+ "heuristics",
+ "impl",
+ "InterpreterError",
+ "jit",
+ "JITFunction",
+ "KernelInterface",
+ "language",
+ "MockTensor",
+ "next_power_of_2",
+ "ops",
+ "OutOfResources",
+ "reinterpret",
+ "runtime",
+ "TensorWrapper",
+ "TritonError",
+ "testing",
+ "tools",
+]
+
+# -------------------------------------
+# misc. utilities that don't fit well
+# into any specific module
+# -------------------------------------
+
+
+def cdiv(x: int, y: int):
+ return (x + y - 1) // y
+
+
+def next_power_of_2(n: int):
+ """Return the smallest power of 2 greater than or equal to n"""
+ n -= 1
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n |= n >> 32
+ n += 1
+ return n
diff --git a/valley/lib/python3.10/site-packages/triton/backends/compiler.py b/valley/lib/python3.10/site-packages/triton/backends/compiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..990690045204b34f8f335073904436448a3e7918
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/backends/compiler.py
@@ -0,0 +1,76 @@
+import os
+import re
+import subprocess
+
+from abc import ABCMeta, abstractmethod, abstractclassmethod
+from dataclasses import dataclass
+from typing import Union
+
+
+@dataclass(frozen=True)
+class GPUTarget(object):
+ # Target backend, e.g., cuda, hip
+ backend: str
+ # Target architecture, e.g., 90 (for cuda compute capability), gfx940 (for hip)
+ arch: Union[int, str]
+ warp_size: int
+
+
+class BaseBackend(metaclass=ABCMeta):
+
+ def __init__(self, target: GPUTarget) -> None:
+ self.target = target
+ assert self.supports_target(target)
+
+ @staticmethod
+ def _path_to_binary(binary: str):
+ base_dir = os.path.join(os.path.dirname(__file__), os.pardir)
+ paths = [
+ os.environ.get(f"TRITON_{binary.upper()}_PATH", ""),
+ os.path.join(base_dir, "third_party", "cuda", "bin", binary),
+ ]
+ for p in paths:
+ bin = p.split(" ")[0]
+ if os.path.exists(bin) and os.path.isfile(bin):
+ result = subprocess.check_output([bin, "--version"], stderr=subprocess.STDOUT)
+ if result is not None:
+ version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE)
+ if version is not None:
+ return p, version.group(1)
+ raise RuntimeError(f"Cannot find {binary}")
+
+ @abstractclassmethod
+ def supports_target(target: GPUTarget):
+ raise NotImplementedError
+
+ @abstractmethod
+ def hash(self) -> str:
+ """Returns a unique identifier for this backend"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def parse_options(self, options: dict) -> object:
+ """
+ Converts an `options` dictionary into an arbitrary object and returns it.
+ This function may contain target-specific heuristics and check the legality of the provided options
+ """
+ raise NotImplementedError
+
+ @abstractmethod
+ def add_stages(self, stages: dict, options: object) -> None:
+ """
+ Populates `stages` dictionary with entries of the form:
+ ir_name [str] => Function[(src: str, metadata: dict) -> str|bytes]
+ The value of each entry may populate a `metadata` dictionary.
+ Stages will be run sequentially (in inseriton order) and can communicate using `metadata`.
+ All stages are expected to return a `str` object, except for the last stage which returns
+ a `bytes` object for execution by the launcher.
+ """
+ raise NotImplementedError
+
+ @abstractmethod
+ def load_dialects(self, context):
+ """
+ Load additional MLIR dialects into the provided `context`
+ """
+ raise NotImplementedError
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__init__.py b/valley/lib/python3.10/site-packages/triton/profiler/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0add689155c3dc28595e6ce1aeead48e133276dc
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/__init__.py
@@ -0,0 +1,10 @@
+# flake8: noqa
+from .scope import scope, enter_scope, exit_scope
+from .profile import (
+ start,
+ activate,
+ deactivate,
+ finalize,
+ profile,
+ DEFAULT_PROFILE_NAME,
+)
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/__init__.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5233867f65529a0185a54bf737710e8a591fdb46
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/__init__.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/flags.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/flags.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2bd7adf992b372973f96a8c0cd4c2c3284ac8aa4
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/flags.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/hook.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/hook.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bbe7feb9045613e5fc3140e99a85d009198673fa
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/hook.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/profile.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/profile.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17cf1065b3089ca820a6aeedfea31ae7f8705cd9
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/profile.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/proton.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/proton.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27ccbe0ff148e59adab26452541033d94f523ab5
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/proton.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/scope.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/scope.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e65c2aa1c654d4cb414831e388586ae783df6502
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/scope.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/viewer.cpython-310.pyc b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/viewer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07a4385be05b6e4b8c570e6ed23338c8367252e4
Binary files /dev/null and b/valley/lib/python3.10/site-packages/triton/profiler/__pycache__/viewer.cpython-310.pyc differ
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/flags.py b/valley/lib/python3.10/site-packages/triton/profiler/flags.py
new file mode 100644
index 0000000000000000000000000000000000000000..37c75b2433efb08f1c3d8a60a661da0e65fee763
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/flags.py
@@ -0,0 +1,33 @@
+"""
+This file contains the global flags used in the proton package.
+"""
+
+# Whether to enable profiling. Default is False.
+profiling_on = False
+# Whether the script is run from the command line. Default is False.
+command_line = False
+
+
+def set_profiling_on():
+ global profiling_on
+ profiling_on = True
+
+
+def set_profiling_off():
+ global profiling_on
+ profiling_on = False
+
+
+def get_profiling_on():
+ global profiling_on
+ return profiling_on
+
+
+def set_command_line():
+ global command_line
+ command_line = True
+
+
+def is_command_line():
+ global command_line
+ return command_line
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/hook.py b/valley/lib/python3.10/site-packages/triton/profiler/hook.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9ec5f36b0f7ea50bdf04d5ef52993e69f995da3
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/hook.py
@@ -0,0 +1,33 @@
+from .scope import enter_scope, exit_scope
+from triton.compiler import CompiledKernel, LazyDict
+
+COMPUTE_METADATA_SCOPE_NAME = "__proton_launch_metadata"
+
+
+class TritonHook:
+ flops_width = [8, 16, 32, 64]
+ metrics = [f"flops{width}" for width in flops_width] + ["bytes"]
+
+ @staticmethod
+ def enter(lazy_dict: LazyDict) -> None:
+ enter_scope(COMPUTE_METADATA_SCOPE_NAME)
+ metadata = lazy_dict.get()
+ exit_scope()
+ fn_metrics = {k: metadata[k] for k in TritonHook.metrics if k in metadata}
+ enter_scope(metadata["name"], triton_op=True, metrics=fn_metrics)
+
+ @staticmethod
+ def exit(lazy_dict: LazyDict) -> None:
+ exit_scope(triton_op=True)
+
+
+def register_triton_hook() -> None:
+ if CompiledKernel.launch_enter_hook is None:
+ CompiledKernel.launch_enter_hook = TritonHook.enter
+ CompiledKernel.launch_exit_hook = TritonHook.exit
+
+
+def unregister_triton_hook() -> None:
+ if CompiledKernel.launch_enter_hook == TritonHook.enter:
+ CompiledKernel.launch_enter_hook = None
+ CompiledKernel.launch_exit_hook = None
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/profile.py b/valley/lib/python3.10/site-packages/triton/profiler/profile.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bf7938a5967c740eedaf987271c23e4f5fb7c06
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/profile.py
@@ -0,0 +1,192 @@
+import functools
+import triton
+
+from triton._C.libproton import proton as libproton
+from .hook import register_triton_hook, unregister_triton_hook
+from .flags import set_profiling_off, set_profiling_on, is_command_line
+from typing import Optional
+
+DEFAULT_PROFILE_NAME = "proton"
+
+
+def _select_backend() -> str:
+ backend = triton.runtime.driver.active.get_current_target().backend
+ if backend == "cuda":
+ return "cupti"
+ elif backend == "hip":
+ return "roctracer"
+ else:
+ raise ValueError("No backend is available for the current target.")
+
+
+def start(
+ name: Optional[str] = None,
+ *,
+ context: Optional[str] = "shadow",
+ data: Optional[str] = "tree",
+ backend: Optional[str] = None,
+ hook: Optional[str] = None,
+):
+ """
+ Start profiling with the given name and backend.
+
+ Usage:
+
+ ```python
+ proton.start("my_profile")
+ # do something
+ proton.finalize()
+ ```
+
+ Args:
+ name (str, optional): The name (with path) of the profiling session.
+ If not provided, the default name is "~/proton.hatchet".
+ backend (str, optional): The backend to use for profiling.
+ Available options are ["cupti"].
+ Defaults to None, which automatically selects the backend matching the current active runtime.
+ context (str, optional): The context to use for profiling.
+ Available options are ["shadow", "python"].
+ Defaults to "shadow".
+ data (str, optional): The data structure to use for profiling.
+ Available options are ["tree"].
+ Defaults to "tree".
+ hook (str, optional): The hook to use for profiling.
+ Available options are [None, "triton"].
+ Defaults to None.
+ Returns:
+ session (int): The session ID of the profiling session.
+ """
+ if is_command_line():
+ # Ignore the start() call if the script is run from the command line.
+ return
+
+ if name is None:
+ name = DEFAULT_PROFILE_NAME
+
+ if backend is None:
+ backend = _select_backend()
+
+ set_profiling_on()
+ if hook and hook == "triton":
+ register_triton_hook()
+ return libproton.start(name, context, data, backend)
+
+
+def activate(session: Optional[int] = 0) -> None:
+ """
+ Activate the specified session.
+ The profiling session will be active and data will be recorded.
+
+ Args:
+ session (int): The session ID of the profiling session. Defaults to 0 (the first session started.)
+
+ Returns:
+ None
+ """
+ if is_command_line() and session != 0:
+ raise ValueError("Only one session can be activated when running from the command line.")
+ libproton.activate(session)
+
+
+def deactivate(session: Optional[int] = 0) -> None:
+ """
+ Stop the specified session.
+ The profiling session's data will still be in the memory, but no more data will be recorded.
+
+ Args:
+ session (int): The session ID of the profiling session. Defaults to 0 (the first session started.)
+
+ Returns:
+ None
+ """
+ if is_command_line() and session != 0:
+ raise ValueError("Only one session can be deactivated when running from the command line.")
+ libproton.deactivate(session)
+
+
+def finalize(session: Optional[int] = None, output_format: str = "hatchet") -> None:
+ """
+ Finalizes a profiling session.
+ Flush and write the profiling data to the file specified by the session name.
+
+ Args:
+ session (int, optional): The session ID to finalize. If None, all sessions are finalized. Defaults to None.
+ output_format (str, optional): The output format for the profiling results.
+ Aavailable options are ["hatchet"].
+
+ Returns:
+ None
+ """
+ if session is None:
+ set_profiling_off()
+ libproton.finalize_all(output_format)
+ unregister_triton_hook()
+ else:
+ if is_command_line() and session != 0:
+ raise ValueError("Only one session can be finalized when running from the command line.")
+ libproton.finalize(session, output_format)
+
+
+def _profiling(
+ func,
+ name: Optional[str] = None,
+ context: Optional[str] = "shadow",
+ data: Optional[str] = "tree",
+ backend: Optional[str] = None,
+ hook: Optional[str] = None,
+):
+ """
+ Context manager for profiling. Internally use only.
+
+ Args:
+ See start() for the arguments.
+
+ Returns:
+ wrapper (function): The wrapped function.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ session = start(name, context=context, data=data, backend=backend, hook=hook)
+ ret = func(*args, **kwargs)
+ deactivate(session)
+ return ret
+
+ return wrapper
+
+
+def profile(
+ func=None,
+ *,
+ name: Optional[str] = None,
+ context: Optional[str] = "shadow",
+ data: Optional[str] = "tree",
+ backend: Optional[str] = None,
+ hook: Optional[str] = None,
+):
+ """
+ Decorator for profiling.
+
+ Usage:
+
+ ```python
+ @proton.profile
+ def foo():
+ pass
+ ```
+
+ Args:
+ See start() for the arguments.
+
+ Returns:
+ decorator (function): The decorator function.
+ """
+ if func is None:
+ # It's being used with parentheses, so return a decorator
+ def decorator(f):
+ return _profiling(f, name=name, context=context, data=data, backend=backend, hook=hook)
+
+ return decorator
+ else:
+ # It's being used without parentheses, so apply the decorator directly
+ return _profiling(func, name=name, context=context, data=data, backend=backend, hook=hook)
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/proton.py b/valley/lib/python3.10/site-packages/triton/profiler/proton.py
new file mode 100644
index 0000000000000000000000000000000000000000..21267f97da39d0ae971848aa1c96f25958235979
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/proton.py
@@ -0,0 +1,78 @@
+import argparse
+import sys
+import os
+from .profile import start, finalize, _select_backend
+from .flags import set_command_line
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(
+ description="The proton command utility for profiling scripts and pytest tests.", usage="""
+ proton [options] script.py [script_args] [script_options]
+ proton [options] pytest [pytest_args] [script_options]
+ python -m triton.profiler.proton [options] script.py [script_args] [script_options]
+""", formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument("-n", "--name", type=str, help="Name of the profiling session")
+ parser.add_argument("-b", "--backend", type=str, help="Profiling backend", default=None, choices=["cupti"])
+ parser.add_argument("-c", "--context", type=str, help="Profiling context", default="shadow",
+ choices=["shadow", "python"])
+ parser.add_argument("-d", "--data", type=str, help="Profiling data", default="tree", choices=["tree"])
+ parser.add_argument("-k", "--hook", type=str, help="Profiling hook", default=None, choices=[None, "triton"])
+ args, target_args = parser.parse_known_args()
+ return args, target_args
+
+
+def is_pytest(script):
+ return os.path.basename(script) == 'pytest'
+
+
+def execute_as_main(script, args):
+ script_path = os.path.abspath(script)
+ # Prepare a clean global environment
+ clean_globals = {
+ "__name__": "__main__",
+ "__file__": script_path,
+ "__builtins__": __builtins__,
+ sys.__name__: sys,
+ }
+
+ original_argv = sys.argv
+ sys.argv = [script] + args
+
+ # Execute in the isolated environment
+ try:
+ with open(script_path, 'rb') as file:
+ code = compile(file.read(), script_path, 'exec')
+ exec(code, clean_globals)
+ except Exception as e:
+ print(f"An error occurred while executing the script: {e}")
+ finally:
+ sys.argv = original_argv
+
+
+def run_profiling(args, target_args):
+ backend = args.backend if args.backend else _select_backend()
+
+ start(args.name, context=args.context, data=args.data, backend=backend, hook=args.hook)
+
+ # Set the command line mode to avoid any `start` calls in the script.
+ set_command_line()
+
+ script = target_args[0]
+ script_args = target_args[1:] if len(target_args) > 1 else []
+ if is_pytest(script):
+ import pytest
+ pytest.main(script_args)
+ else:
+ execute_as_main(script, script_args)
+
+ finalize()
+
+
+def main():
+ args, target_args = parse_arguments()
+ run_profiling(args, target_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/scope.py b/valley/lib/python3.10/site-packages/triton/profiler/scope.py
new file mode 100644
index 0000000000000000000000000000000000000000..5695b8807500a887b6351e52e4dc9ae60b2bbd0d
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/scope.py
@@ -0,0 +1,105 @@
+import threading
+from functools import wraps
+from typing import Optional, Union
+
+from .flags import get_profiling_on
+from triton._C.libproton import proton as libproton
+
+_local = threading.local()
+
+MetricValueType = Union[float, int]
+PropertyValueType = Union[float, int, str]
+
+
+class scope:
+ """
+ A context manager and decorator for entering and exiting a scope.
+
+ Usage:
+ context manager:
+ ```python
+ with proton.scope("test0", {metric_name: metric_value}):
+ foo[1,](x, y)
+ ```
+
+ decoarator:
+ ```python
+ @proton.scope("test0", {metric_name: metric_value})
+ def foo(x, y):
+ ...
+ ```
+
+ Args:
+ name (str): The name of the scope.
+ metrics (dict[str, float], optional): The metrics of the scope. Default is None.
+ """
+
+ def __init__(self, name: str, metrics: Optional[dict[str, MetricValueType]] = None,
+ properties: Optional[dict[str, PropertyValueType]] = None) -> None:
+ self._name = name
+ self._metrics = metrics
+ self._properties = properties
+
+ def __enter__(self):
+ if not get_profiling_on():
+ return self
+ self._id = libproton.record_scope()
+ libproton.enter_scope(self._id, self._name)
+ if self._metrics:
+ libproton.add_metrics(self._id, self._metrics)
+ if self._properties:
+ libproton.set_properties(self._id, self._properties)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
+ if not get_profiling_on():
+ return
+ libproton.exit_scope(self._id, self._name)
+
+ def __call__(self, func):
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if get_profiling_on():
+ id = libproton.record_scope()
+ libproton.enter_scope(id, self._name)
+ if self._metrics:
+ libproton.add_metrics(id, self._metrics)
+ if self._properties:
+ libproton.set_properties(id, self._properties)
+ ret = func(*args, **kwargs)
+ if get_profiling_on():
+ libproton.exit_scope(id, self._name)
+ return ret
+
+ return wrapper
+
+
+def enter_scope(name: str, *, triton_op: bool = False, metrics: Optional[dict[str, MetricValueType]] = None,
+ properties: Optional[dict[str, PropertyValueType]] = None) -> int:
+ if not get_profiling_on():
+ return -1
+ id = libproton.record_scope()
+ if not hasattr(_local, "scopes"):
+ _local.scopes = []
+ _local.scopes.append((id, name))
+ if triton_op:
+ libproton.enter_op(id, name)
+ else:
+ libproton.enter_scope(id, name)
+ if metrics:
+ libproton.add_metrics(id, metrics)
+ if properties:
+ libproton.set_properties(id, properties)
+ return id
+
+
+def exit_scope(triton_op: bool = False) -> int:
+ if not get_profiling_on():
+ return -1
+ id, name = _local.scopes.pop()
+ if triton_op:
+ libproton.exit_op(id, name)
+ else:
+ libproton.exit_scope(id, name)
+ return id
diff --git a/valley/lib/python3.10/site-packages/triton/profiler/viewer.py b/valley/lib/python3.10/site-packages/triton/profiler/viewer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ef3a4c93ab4b0035346eb11249f0cba872bfe20
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/profiler/viewer.py
@@ -0,0 +1,236 @@
+import argparse
+from collections import namedtuple
+import json
+import pandas as pd
+
+import hatchet as ht
+from triton.profiler.hook import COMPUTE_METADATA_SCOPE_NAME, TritonHook
+
+
+def match_available_metrics(metrics, raw_metrics):
+ ret = []
+ if metrics:
+ for metric in metrics:
+ metric = metric.lower()
+ for raw_metric in raw_metrics:
+ raw_metric_no_unit = raw_metric.split("(")[0].strip().lower()
+ if metric in (raw_metric, raw_metric_no_unit):
+ ret.append(raw_metric + " (inc)")
+ break
+ else:
+ ret = [raw_metrics[0]] + " (inc)"
+ return ret
+
+
+def get_raw_metrics(file):
+ database = json.load(file)
+ device_info = database.pop(1)
+ gf = ht.GraphFrame.from_literal(database)
+ return gf, gf.show_metric_columns(), device_info
+
+
+def get_min_time_flops(df, device_info):
+ min_time_flops = pd.DataFrame(0.0, index=df.index, columns=["min_time"])
+ for device_type in device_info:
+ for device_index in device_info[device_type]:
+ arch = device_info[device_type][device_index]["arch"]
+ num_sms = device_info[device_type][device_index]["num_sms"]
+ clock_rate = device_info[device_type][device_index]["clock_rate"]
+ for width in TritonHook.flops_width:
+ idx = df["DeviceId"] == device_index
+ device_frames = df[idx]
+ if f"flops{width}" not in device_frames.columns:
+ continue
+ max_flops = 0
+ if device_type == "CUDA":
+ if arch == "80":
+ max_flops = 624e12 / (width / 8)
+ elif arch == "89":
+ # TODO(Keren): Implement fp16 acc-> 660.6 fp8
+ max_flops = (330.3 * 1e12) / (width / 8)
+ elif arch == "90":
+ # 114 sms and 1755mhz is the base number of sms and clock rate of H100 pcie
+ max_flops = ((num_sms / 114 * clock_rate / (1755 * 1e3) * 1513) * 1e12) / (width / 8)
+ elif device_type == "HIP":
+ if arch == "gfx90a":
+ max_flops = 383e12 / (width / 8)
+ elif arch == "gfx941" or arch == "gfx942":
+ max_flops = 2614.9e12 / (width / 8)
+ else:
+ raise ValueError(f"Unsupported device type: {device_type}")
+ min_time_flops.loc[idx, "min_time"] += device_frames[f"flops{width}"].fillna(0) / max_flops
+ return min_time_flops
+
+
+def get_min_time_bytes(df, device_info):
+ min_time_bytes = pd.DataFrame(0.0, index=df.index, columns=["min_time"])
+ for device_type in device_info:
+ for device_index in device_info[device_type]:
+ idx = df["DeviceId"] == device_index
+ device_frames = df[idx]
+ memory_clock_rate = device_info[device_type][device_index]["memory_clock_rate"] # in khz
+ bus_width = device_info[device_type][device_index]["bus_width"] # in bits
+ peak_bandwidth = 2 * bus_width * memory_clock_rate * 1e3 / 8
+ min_time_bytes.loc[idx, "min_time"] += device_frames["bytes"] / peak_bandwidth
+ return min_time_bytes
+
+
+FactorDict = namedtuple("FactorDict", ["name", "factor"])
+time_factor_dict = FactorDict("time", {"time/s": 1, "time/ms": 1e-3, "time/us": 1e-6, "time/ns": 1e-9})
+flops_factor_dict = FactorDict("flops", {"flop/s": 1, "gflop/s": 1e9, "tflop/s": 1e12})
+bytes_factor_dict = FactorDict("bytes", {"byte/s": 1, "gbyte/s": 1e9, "tbyte/s": 1e12})
+
+derivable_metrics = {
+ **{key: flops_factor_dict
+ for key in flops_factor_dict.factor.keys()},
+ **{key: bytes_factor_dict
+ for key in bytes_factor_dict.factor.keys()},
+}
+
+
+def derive_metrics(gf, metrics, raw_metrics, device_info):
+ derived_metrics = []
+ original_metrics = []
+ time_metric_name = match_available_metrics([time_factor_dict.name], raw_metrics)[0]
+ time_unit = (time_factor_dict.name + "/" + time_metric_name.split("(")[1].split(")")[0])
+ for metric in metrics:
+ if metric == "util": # Tensor core only
+ min_time_bytes = get_min_time_bytes(gf.dataframe, device_info)
+ min_time_flops = get_min_time_flops(gf.dataframe, device_info)
+ time_sec = gf.dataframe[time_metric_name] * (time_factor_dict.factor[time_unit] /
+ time_factor_dict.factor["time/s"])
+ gf.dataframe["util (inc)"] = min_time_flops["min_time"].combine(min_time_bytes["min_time"], max) / time_sec
+ derived_metrics.append("util (inc)")
+ elif metric in derivable_metrics:
+ deriveable_metric = derivable_metrics[metric]
+ metric_name = deriveable_metric.name
+ metric_factor_dict = deriveable_metric.factor
+ matched_metric_name = match_available_metrics([metric_name], raw_metrics)[0]
+ gf.dataframe[f"{metric} (inc)"] = (gf.dataframe[matched_metric_name] /
+ (gf.dataframe[time_metric_name] * time_factor_dict.factor[time_unit]) /
+ metric_factor_dict[metric])
+ derived_metrics.append(f"{metric} (inc)")
+ elif metric in time_factor_dict.factor:
+ metric_time_unit = time_factor_dict.name + "/" + metric.split("/")[1]
+ gf.dataframe[f"{metric} (inc)"] = gf.dataframe[time_metric_name] * (
+ time_factor_dict.factor[time_unit] / time_factor_dict.factor[metric_time_unit])
+ derived_metrics.append(f"{metric} (inc)")
+ else:
+ original_metrics.append(metric)
+
+ if original_metrics:
+ original_metrics = match_available_metrics(original_metrics, raw_metrics)
+ return derived_metrics + original_metrics
+
+
+def parse(metrics, filename, include, exclude, threshold, depth):
+ with open(filename, "r") as f:
+ gf, raw_metrics, device_info = get_raw_metrics(f)
+ assert len(raw_metrics) > 0, "No metrics found in the input file"
+ gf.update_inclusive_columns()
+ metrics = derive_metrics(gf, metrics, raw_metrics, device_info)
+ if include or exclude:
+ # make regex do negative match
+ name_filter = f"^(?!{exclude}).*" if exclude else include
+ query = ["*", {"name": name_filter}]
+ gf = gf.filter(query, squash=True)
+ # filter out metadata computation
+ query = [{"name": f"^(?!{COMPUTE_METADATA_SCOPE_NAME}).*"}]
+ gf = gf.filter(query, squash=True)
+ if threshold:
+ # TODO: generalize to support multiple metrics
+ query = ["*", {metrics[0]: f">= {threshold}"}]
+ gf = gf.filter(query, squash=True)
+ print(gf.tree(metric_column=metrics, expand_name=True, depth=depth, render_header=False))
+
+
+def show_metrics(file_name):
+ with open(file_name, "r") as f:
+ _, raw_metrics, _ = get_raw_metrics(f)
+ print("Available metrics:")
+ if raw_metrics:
+ for raw_metric in raw_metrics:
+ raw_metric_no_unit = raw_metric.split("(")[0].strip().lower()
+ print(f"- {raw_metric_no_unit}")
+ return
+
+
+def main():
+ argparser = argparse.ArgumentParser(
+ description="Performance data viewer for proton profiles.",
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ argparser.add_argument(
+ "-l",
+ "--list",
+ action="store_true",
+ help="""List available metrics. Metric names are case insensitive and ignore units.
+Derived metrics can be created when source metrics are available.
+- time/s, time/ms, time/us, time/ns: time
+- flop/s, gflop/s, tflop/s: flops / time
+- byte/s, gbyte/s, tbyte/s: bytes / time
+- util: max(sum(flops) / peak_flops_time, bytes / peak_bandwidth_time))
+""",
+ )
+ argparser.add_argument(
+ "-m",
+ "--metrics",
+ type=str,
+ default=None,
+ help="""At maximum two metrics can be specified, separated by comma.
+There are two modes:
+1) Choose the output metric to display. It's case insensitive and ignore units.
+2) Derive a new metric from existing metrics.
+""",
+ )
+ argparser.add_argument(
+ "-i",
+ "--include",
+ type=str,
+ default=None,
+ help="Include frames(kernels) that match the given regular expression",
+ )
+ argparser.add_argument(
+ "-e",
+ "--exclude",
+ type=str,
+ default=None,
+ help="Exclude frames(kernels) that match the given regular expression",
+ )
+
+ argparser.add_argument(
+ "-t",
+ "--threshold",
+ type=float,
+ default=None,
+ help=
+ "Exclude frames(kernels) whose metrics are below the given threshold. This filter only applies on the first metric.",
+ )
+
+ argparser.add_argument(
+ "-d",
+ "--depth",
+ type=int,
+ default=100,
+ help="The depth of the tree to display",
+ )
+
+ args, target_args = argparser.parse_known_args()
+ assert len(target_args) == 1, "Must specify a file to read"
+
+ file_name = target_args[0]
+ metrics = args.metrics.split(",") if args.metrics else None
+ include = args.include
+ exclude = args.exclude
+ threshold = args.threshold
+ depth = args.depth
+ if include and exclude:
+ raise ValueError("Cannot specify both include and exclude")
+ if args.list:
+ show_metrics(file_name)
+ elif metrics:
+ parse(metrics, file_name, include, exclude, threshold, depth)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/valley/lib/python3.10/site-packages/triton/testing.py b/valley/lib/python3.10/site-packages/triton/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..86092f098f9dbdcacf374c01bb2b3a9fd6d6ae38
--- /dev/null
+++ b/valley/lib/python3.10/site-packages/triton/testing.py
@@ -0,0 +1,493 @@
+import functools
+import os
+import subprocess
+import sys
+from contextlib import contextmanager
+from typing import Any, Dict, List
+from . import language as tl
+
+
+def nvsmi(attrs):
+ attrs = ','.join(attrs)
+ cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits']
+ out = subprocess.check_output(cmd)
+ ret = out.decode(sys.stdout.encoding).split(',')
+ ret = [int(x) for x in ret]
+ return ret
+
+
+def do_bench_cudagraph(fn, rep=20, grad_to_none=None, return_mode="mean"):
+ """
+ Benchmark the runtime of the provided function.
+
+ :param fn: Function to benchmark
+ :type fn: Callable
+ :param rep: Repetition time (in ms)
+ :type rep: int
+ :param grad_to_none: Reset the gradient of the provided tensor to None
+ :type grad_to_none: torch.tensor, optional
+ """
+ import torch
+ assert return_mode in ["min", "max", "mean", "median"]
+
+ if torch.cuda.current_stream() == torch.cuda.default_stream():
+ raise RuntimeError("Cannot capture graph in default stream. Please use side stream in benchmark code.")
+ # warmup
+ fn()
+ # step 1 - we estimate the amount of time the kernel call takes
+ # NOTE: this estimate isn't super accurate because the GPU isn't warmed up at this point
+ # but it is probably good enough
+ if grad_to_none is not None:
+ for x in grad_to_none:
+ x.detach_()
+ x.requires_grad_(True)
+ x.grad = None
+ g = torch.cuda.CUDAGraph()
+ with torch.cuda.graph(g):
+ fn()
+ torch.cuda.synchronize()
+ start_event = torch.cuda.Event(enable_timing=True)
+ end_event = torch.cuda.Event(enable_timing=True)
+ start_event.record()
+ g.replay()
+ end_event.record()
+ torch.cuda.synchronize()
+ estimate_ms = start_event.elapsed_time(end_event)
+ n_repeat = max(1, int(rep / estimate_ms))
+ # step 2 - construct a cuda graph with `n_repeat` unrolled function calls to minimize
+ # host overhead
+ g = torch.cuda.CUDAGraph()
+ with torch.cuda.graph(g):
+ for i in range(n_repeat):
+ if grad_to_none is not None:
+ for x in grad_to_none:
+ x.grad = None
+ fn()
+ torch.cuda.synchronize()
+ # measure time and return
+ ret = []
+ n_retries = 10
+ for i in range(n_retries):
+ start_event = torch.cuda.Event(enable_timing=True)
+ end_event = torch.cuda.Event(enable_timing=True)
+ start_event.record()
+ g.replay()
+ end_event.record()
+ torch.cuda.synchronize()
+ ret += [start_event.elapsed_time(end_event) / n_repeat]
+ times = torch.tensor(ret)
+ return getattr(torch, return_mode)(times).item()
+
+
+def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flush=True, return_mode="mean"):
+ """
+ Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
+ the 20-th and 80-th performance percentile.
+
+ :param fn: Function to benchmark
+ :type fn: Callable
+ :param warmup: Warmup time (in ms)
+ :type warmup: int
+ :param rep: Repetition time (in ms)
+ :type rep: int
+ :param grad_to_none: Reset the gradient of the provided tensor to None
+ :type grad_to_none: torch.tensor, optional
+ :param quantiles: Performance percentile to return in addition to the median.
+ :type quantiles: list[float]
+ :param fast_flush: Use faster kernel to flush L2 between measurements
+ :type fast_flush: bool
+ """
+ assert return_mode in ["min", "max", "mean", "median"]
+ import torch
+
+ fn()
+ torch.cuda.synchronize()
+
+ # We maintain a buffer of 256 MB that we clear
+ # before each kernel call to make sure that the L2
+ # doesn't contain any input data before the run
+ if fast_flush:
+ cache = torch.empty(int(256e6 // 4), dtype=torch.int, device='cuda')
+ else:
+ cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda')
+
+ # Estimate the runtime of the function
+ start_event = torch.cuda.Event(enable_timing=True)
+ end_event = torch.cuda.Event(enable_timing=True)
+ start_event.record()
+ for _ in range(5):
+ cache.zero_()
+ fn()
+ end_event.record()
+ torch.cuda.synchronize()
+ estimate_ms = start_event.elapsed_time(end_event) / 5
+
+ # compute number of warmup and repeat
+ n_warmup = max(1, int(warmup / estimate_ms))
+ n_repeat = max(1, int(rep / estimate_ms))
+ start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
+ end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
+ # Warm-up
+ for _ in range(n_warmup):
+ fn()
+ # Benchmark
+ for i in range(n_repeat):
+ # we don't want `fn` to accumulate gradient values
+ # if it contains a backward pass. So we clear the
+ # provided gradients
+ if grad_to_none is not None:
+ for x in grad_to_none:
+ x.grad = None
+ # we clear the L2 cache before each run
+ cache.zero_()
+ # record time of `fn`
+ start_event[i].record()
+ fn()
+ end_event[i].record()
+ # Record clocks
+ torch.cuda.synchronize()
+ times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)], dtype=torch.float)
+ if quantiles is not None:
+ ret = torch.quantile(times, torch.tensor(quantiles, dtype=torch.float)).tolist()
+ if len(ret) == 1:
+ ret = ret[0]
+ return ret
+ return getattr(torch, return_mode)(times).item()
+
+
+def assert_close(x, y, atol=None, rtol=None, err_msg=''):
+ import numpy as np
+ import torch
+
+ # canonicalize arguments to be tensors
+ if not isinstance(x, torch.Tensor):
+ x = torch.tensor(x)
+ if not isinstance(y, torch.Tensor):
+ y = torch.tensor(y)
+ # absolute tolerance
+ if atol is None:
+ atol = 1e-2
+ atol = atol(x.dtype) if callable(atol) else atol
+ # relative tolerance hook
+ if rtol is None:
+ rtol = 0.
+ rtol = rtol(x.dtype) if callable(rtol) else rtol
+ # we use numpy instead of pytorch
+ # as it seems more memory efficient
+ # pytorch tends to oom on large tensors
+ if isinstance(x, torch.Tensor):
+ if x.dtype == torch.bfloat16:
+ x = x.float()
+ x = x.cpu().detach().numpy()
+ if isinstance(y, torch.Tensor):
+ if y.dtype == torch.bfloat16:
+ y = y.float()
+ y = y.cpu().detach().numpy()
+ # we handle size==1 case separately as we can
+ # provide better error message there
+ if x.size > 1 or y.size > 1:
+ np.testing.assert_allclose(x, y, atol=atol, rtol=rtol, equal_nan=True)
+ return
+ if not np.allclose(x, y, atol=atol, rtol=rtol):
+ raise AssertionError(f'{err_msg} {x} is not close to {y} (atol={atol}, rtol={rtol})')
+
+
+class Benchmark:
+ """
+ This class is used by the :code:`perf_report` function to generate line plots with a concise API.
+ """
+
+ def __init__(
+ self,
+ x_names: List[str],
+ x_vals: List[Any],
+ line_arg: str,
+ line_vals: List[Any],
+ line_names: List[str],
+ plot_name: str,
+ args: Dict[str, Any],
+ xlabel: str = '',
+ ylabel: str = '',
+ x_log: bool = False,
+ y_log: bool = False,
+ color=None,
+ styles=None,
+ ):
+ """
+ Constructor.
+ x_vals can be a list of scalars or a list of tuples/lists. If x_vals is a list
+ of scalars and there are multiple x_names, all arguments will have the same value.
+ If x_vals is a list of tuples/lists, each element should have the same length as
+ x_names.
+
+ :param x_names: Name of the arguments that should appear on the x axis of the plot.
+ :type x_names: List[str]
+ :param x_vals: List of values to use for the arguments in :code:`x_names`.
+ :type x_vals: List[Any]
+ :param line_arg: Argument name for which different values correspond to different lines in the plot.
+ :type line_arg: str
+ :param line_vals: List of values to use for the arguments in :code:`line_arg`.
+ :type line_vals: List[Any]
+ :param line_names: Label names for the different lines.
+ :type line_names: List[str]
+ :param plot_name: Name of the plot.
+ :type plot_name: str
+ :param args: Dictionary of keyword arguments to remain fixed throughout the benchmark.
+ :type args: Dict[str, Any]
+ :param xlabel: Label for the x axis of the plot.
+ :type xlabel: str, optional
+ :param ylabel: Label for the y axis of the plot.
+ :type ylabel: str, optional
+ :param x_log: Whether the x axis should be log scale.
+ :type x_log: bool, optional
+ :param y_log: Whether the y axis should be log scale.
+ :type y_log: bool, optional
+ """
+ self.x_names = x_names
+ self.x_vals = x_vals
+ self.x_log = x_log
+ self.line_arg = line_arg
+ self.line_vals = line_vals
+ self.line_names = line_names
+ self.y_log = y_log
+ self.styles = styles
+ # plot info
+ self.xlabel = xlabel
+ self.ylabel = ylabel
+ self.plot_name = plot_name
+ self.args = args
+
+
+class Mark:
+
+ def __init__(self, fn, benchmarks):
+ self.fn = fn
+ self.benchmarks = benchmarks
+
+ def _run(self, bench: Benchmark, save_path: str, show_plots: bool, print_data: bool, diff_col=False,
+ save_precision=6, **kwrags):
+ import os
+
+ import matplotlib.pyplot as plt
+ import pandas as pd
+ y_mean = bench.line_names
+ y_min = [f'{x}-min' for x in bench.line_names]
+ y_max = [f'{x}-max' for x in bench.line_names]
+ x_names = list(bench.x_names)
+ df = pd.DataFrame(columns=x_names + y_mean + y_min + y_max)
+ for x in bench.x_vals:
+ # x can be a single value or a sequence of values.
+ if not isinstance(x, (list, tuple)):
+ x = [x for _ in x_names]
+
+ if len(x) != len(x_names):
+ raise ValueError(f"Expected {len(x_names)} values, got {x}")
+ x_args = dict(zip(x_names, x))
+
+ row_mean, row_min, row_max = [], [], []
+ for y in bench.line_vals:
+ ret = self.fn(**x_args, **{bench.line_arg: y}, **bench.args, **kwrags)
+ try:
+ y_mean, y_min, y_max = ret
+ except TypeError:
+ y_mean, y_min, y_max = ret, None, None
+ row_mean += [y_mean]
+ row_min += [y_min]
+ row_max += [y_max]
+ df.loc[len(df)] = list(x) + row_mean + row_min + row_max
+
+ if bench.plot_name:
+ plt.figure()
+ ax = plt.subplot()
+ # Plot first x value on x axis if there are multiple.
+ first_x = x_names[0]
+ for i, y in enumerate(bench.line_names):
+ y_min, y_max = df[y + '-min'], df[y + '-max']
+ col = bench.styles[i][0] if bench.styles else None
+ sty = bench.styles[i][1] if bench.styles else None
+ ax.plot(df[first_x], df[y], label=y, color=col, ls=sty)
+ if not y_min.isnull().all() and not y_max.isnull().all():
+ y_min = y_min.astype(float)
+ y_max = y_max.astype(float)
+ ax.fill_between(df[first_x], y_min, y_max, alpha=0.15, color=col)
+ ax.legend()
+ ax.set_xlabel(bench.xlabel or first_x)
+ ax.set_ylabel(bench.ylabel)
+ # ax.set_title(bench.plot_name)
+ ax.set_xscale("log" if bench.x_log else "linear")
+ ax.set_yscale("log" if bench.y_log else "linear")
+ if show_plots:
+ plt.show()
+ if save_path:
+ plt.savefig(os.path.join(save_path, f"{bench.plot_name}.png"))
+ df = df[x_names + bench.line_names]
+ if diff_col and df.shape[1] == 2:
+ col0, col1 = df.columns.tolist()
+ df['Diff'] = df[col1] - df[col0]
+
+ if print_data:
+ print(bench.plot_name + ':')
+ print(df.to_string())
+ if save_path:
+ df.to_csv(os.path.join(save_path, f"{bench.plot_name}.csv"), float_format=f"%.{save_precision}f",
+ index=False)
+ return df
+
+ def run(self, show_plots=False, print_data=False, save_path='', return_df=False, **kwargs):
+ has_single_bench = isinstance(self.benchmarks, Benchmark)
+ benchmarks = [self.benchmarks] if has_single_bench else self.benchmarks
+ result_dfs = []
+ if save_path:
+ # Create directory if it doesn't exist
+ os.makedirs(save_path, exist_ok=True)
+ html = open(os.path.join(save_path, "results.html"), "w")
+ html.write("\n")
+ for bench in benchmarks:
+ result_dfs.append(self._run(bench, save_path, show_plots, print_data, **kwargs))
+ if save_path:
+ html.write(f"\n")
+ if save_path:
+ html.write("\n")
+ html.close()
+ if return_df:
+ if has_single_bench:
+ return result_dfs[0]
+ else:
+ return result_dfs
+ return None
+
+
+def perf_report(benchmarks):
+ """
+ Mark a function for benchmarking. The benchmark can then be executed by using the :code:`.run` method on the return value.
+
+ :param benchmarks: Benchmarking configurations.
+ :type benchmarks: List of :class:`Benchmark`
+ """
+ wrapper = lambda fn: Mark(fn, benchmarks)
+ return wrapper
+
+
+def get_dram_gbps(device=None):
+ ''' return DRAM bandwidth in GB/s '''
+ import torch
+
+ from .runtime import driver
+ if not device:
+ device = torch.cuda.current_device()
+ mem_clock_khz = driver.active.utils.get_device_properties(device)["mem_clock_rate"] # in kHz
+ bus_width = driver.active.utils.get_device_properties(device)["mem_bus_width"]
+ bw_gbps = mem_clock_khz * bus_width * 2 / 1e6 / 8 # In GB/s
+ return bw_gbps
+
+
+def get_max_tensorcore_tflops(dtype, clock_rate, device=None):
+ import torch
+
+ from .runtime import driver
+ if not device:
+ device = torch.cuda.current_device()
+
+ num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4
+ capability = torch.cuda.get_device_capability(device)
+ if capability[0] < 8:
+ assert dtype == torch.float16
+ ops_per_sub_core = 256 # 2 4x4x4 Tensor Cores
+ else:
+ if dtype in [torch.float32, torch.int32]:
+ ops_per_sub_core = 256
+ elif dtype in [torch.float16, torch.bfloat16, torch.int16]:
+ ops_per_sub_core = 512
+ elif dtype in [torch.int8, tl.float8e4nv, tl.float8e4b15, tl.float8e5]:
+ ops_per_sub_core = 1024
+ else:
+ raise RuntimeError("dtype not supported")
+ tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
+ return tflops
+
+
+# create decorator that wraps test function into
+# a cuda-memcheck system call
+
+
+def cuda_memcheck(**target_kwargs):
+
+ def decorator(test_fn):
+
+ @functools.wraps(test_fn)
+ def wrapper(*args, **kwargs):
+ import psutil
+ ppid_name = psutil.Process(os.getppid()).name()
+ run_cuda_memcheck = target_kwargs.items() <= kwargs.items()
+ if run_cuda_memcheck and ppid_name != "cuda-memcheck":
+ path = os.path.realpath(test_fn.__globals__["__file__"])
+ # get path of current file
+ env = {"PATH": os.environ["PATH"], "PYTORCH_NO_CUDA_MEMORY_CACHING": "1"}
+ assert 'request' in kwargs, "memcheck'ed test must have a (possibly unused) `request` fixture"
+ test_id = kwargs['request'].node.callspec.id
+ cmd = f"{path}::{test_fn.__name__}[{test_id}]"
+ out = subprocess.run(["cuda-memcheck", "pytest", "-vs", cmd], capture_output=True, env=env)
+ assert out.returncode == 0, "cuda-memcheck returned an error: bounds checking failed"
+ assert "ERROR SUMMARY: 0 errors" in str(out.stdout)
+ else:
+ test_fn(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
+
+
+@contextmanager
+def set_gpu_clock(ref_sm_clock=1350, ref_mem_clock=1215):
+ try:
+ subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "1"])
+ subprocess.check_output([
+ "nvidia-smi",
+ "-i",
+ "0",
+ f"--lock-gpu-clocks={ref_sm_clock},{ref_sm_clock}",
+ ])
+ subprocess.check_output([
+ "nvidia-smi",
+ "-i",
+ "0",
+ f"--lock-memory-clocks={ref_mem_clock},{ref_mem_clock}",
+ ])
+ cur_sm_clock = nvsmi(["clocks.current.sm"])[0]
+ cur_mem_clock = nvsmi(["clocks.current.memory"])[0]
+ assert abs(cur_sm_clock - ref_sm_clock) < 10, f"GPU SMs must run at {ref_sm_clock} MHz"
+ assert abs(cur_mem_clock - ref_mem_clock) < 10, f"GPU SMs must run at {ref_mem_clock} MHz"
+ tflops = 1e-6 * 2 * 108 * 4 * 256 * ref_sm_clock
+ gbps = 640 * 2 * ref_mem_clock * 1e-3
+ yield tflops, gbps
+ finally:
+ subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "0"])
+ subprocess.check_output(["nvidia-smi", "-i", "0", "-rgc"])
+ subprocess.check_output(["nvidia-smi", "-i", "0", "-rmc"])
+
+
+def get_max_simd_tflops(dtype, clock_rate, device=None):
+ import torch
+
+ from .runtime import driver
+ if not device:
+ device = torch.cuda.current_device()
+
+ num_subcores = driver.active.utils.get_device_properties(device)["multiprocessor_count"] * 4
+ capability = torch.cuda.get_device_capability()
+ if capability[0] < 8:
+ if dtype == torch.float32:
+ ops_per_sub_core = 32 # 2*16
+ elif dtype == torch.float16:
+ ops_per_sub_core = 64
+ else:
+ raise RuntimeError("dtype not supported")
+ else:
+ if dtype == torch.float32:
+ ops_per_sub_core = 32
+ elif dtype in [torch.float16, torch.bfloat16]:
+ ops_per_sub_core = 64
+ else:
+ raise RuntimeError("dtype not supported")
+ tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
+ return tflops