content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
import functools\nimport inspect\nimport re\nimport textwrap\n\nimport pytest\n\nimport pkg_resources\n\nfrom .test_resources import Metadata\n\n\ndef strip_comments(s):\n return '\n'.join(\n line\n for line in s.split('\n')\n if line.strip() and not line.strip().startswith('#')\n )\n\n\ndef parse_distributions(s):\n """\n Parse a series of distribution specs of the form:\n {project_name}-{version}\n [optional, indented requirements specification]\n\n Example:\n\n foo-0.2\n bar-1.0\n foo>=3.0\n [feature]\n baz\n\n yield 2 distributions:\n - project_name=foo, version=0.2\n - project_name=bar, version=1.0,\n requires=['foo>=3.0', 'baz; extra=="feature"']\n """\n s = s.strip()\n for spec in re.split(r'\n(?=[^\s])', s):\n if not spec:\n continue\n fields = spec.split('\n', 1)\n assert 1 <= len(fields) <= 2\n name, version = fields.pop(0).rsplit('-', 1)\n if fields:\n requires = textwrap.dedent(fields.pop(0))\n metadata = Metadata(('requires.txt', requires))\n else:\n metadata = None\n dist = pkg_resources.Distribution(\n project_name=name, version=version, metadata=metadata\n )\n yield dist\n\n\nclass FakeInstaller:\n def __init__(self, installable_dists) -> None:\n self._installable_dists = installable_dists\n\n def __call__(self, req):\n return next(\n iter(filter(lambda dist: dist in req, self._installable_dists)), None\n )\n\n\ndef parametrize_test_working_set_resolve(*test_list):\n idlist = []\n argvalues = []\n for test in test_list:\n (\n name,\n installed_dists,\n installable_dists,\n requirements,\n expected1,\n expected2,\n ) = (\n strip_comments(s.lstrip())\n for s in textwrap.dedent(test).lstrip().split('\n\n', 5)\n )\n installed_dists = list(parse_distributions(installed_dists))\n installable_dists = list(parse_distributions(installable_dists))\n requirements = list(pkg_resources.parse_requirements(requirements))\n for id_, replace_conflicting, expected in (\n (name, False, expected1),\n (name + '_replace_conflicting', True, expected2),\n ):\n idlist.append(id_)\n expected = strip_comments(expected.strip())\n if re.match(r'\w+$', expected):\n expected = getattr(pkg_resources, expected)\n assert issubclass(expected, Exception)\n else:\n expected = list(parse_distributions(expected))\n argvalues.append(\n pytest.param(\n installed_dists,\n installable_dists,\n requirements,\n replace_conflicting,\n expected,\n )\n )\n return pytest.mark.parametrize(\n (\n "installed_dists",\n "installable_dists",\n "requirements",\n "replace_conflicting",\n "resolved_dists_or_exception",\n ),\n argvalues,\n ids=idlist,\n )\n\n\n@parametrize_test_working_set_resolve(\n """\n # id\n noop\n\n # installed\n\n # installable\n\n # wanted\n\n # resolved\n\n # resolved [replace conflicting]\n """,\n """\n # id\n already_installed\n\n # installed\n foo-3.0\n\n # installable\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n foo-3.0\n\n # resolved [replace conflicting]\n foo-3.0\n """,\n """\n # id\n installable_not_installed\n\n # installed\n\n # installable\n foo-3.0\n foo-4.0\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n foo-3.0\n\n # resolved [replace conflicting]\n foo-3.0\n """,\n """\n # id\n not_installable\n\n # installed\n\n # installable\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n DistributionNotFound\n\n # resolved [replace conflicting]\n DistributionNotFound\n """,\n """\n # id\n no_matching_version\n\n # installed\n\n # installable\n foo-3.1\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n DistributionNotFound\n\n # resolved [replace conflicting]\n DistributionNotFound\n """,\n """\n # id\n installable_with_installed_conflict\n\n # installed\n foo-3.1\n\n # installable\n foo-3.5\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n foo-3.5\n """,\n """\n # id\n not_installable_with_installed_conflict\n\n # installed\n foo-3.1\n\n # installable\n\n # wanted\n foo>=2.1,!=3.1,<4\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n DistributionNotFound\n """,\n """\n # id\n installed_with_installed_require\n\n # installed\n foo-3.9\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # installable\n\n # wanted\n baz\n\n # resolved\n foo-3.9\n baz-0.1\n\n # resolved [replace conflicting]\n foo-3.9\n baz-0.1\n """,\n """\n # id\n installed_with_conflicting_installed_require\n\n # installed\n foo-5\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # installable\n\n # wanted\n baz\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n DistributionNotFound\n """,\n """\n # id\n installed_with_installable_conflicting_require\n\n # installed\n foo-5\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # installable\n foo-2.9\n\n # wanted\n baz\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n baz-0.1\n foo-2.9\n """,\n """\n # id\n installed_with_installable_require\n\n # installed\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # installable\n foo-3.9\n\n # wanted\n baz\n\n # resolved\n foo-3.9\n baz-0.1\n\n # resolved [replace conflicting]\n foo-3.9\n baz-0.1\n """,\n """\n # id\n installable_with_installed_require\n\n # installed\n foo-3.9\n\n # installable\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # wanted\n baz\n\n # resolved\n foo-3.9\n baz-0.1\n\n # resolved [replace conflicting]\n foo-3.9\n baz-0.1\n """,\n """\n # id\n installable_with_installable_require\n\n # installed\n\n # installable\n foo-3.9\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # wanted\n baz\n\n # resolved\n foo-3.9\n baz-0.1\n\n # resolved [replace conflicting]\n foo-3.9\n baz-0.1\n """,\n """\n # id\n installable_with_conflicting_installable_require\n\n # installed\n foo-5\n\n # installable\n foo-2.9\n baz-0.1\n foo>=2.1,!=3.1,<4\n\n # wanted\n baz\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n baz-0.1\n foo-2.9\n """,\n """\n # id\n conflicting_installables\n\n # installed\n\n # installable\n foo-2.9\n foo-5.0\n\n # wanted\n foo>=2.1,!=3.1,<4\n foo>=4\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n VersionConflict\n """,\n """\n # id\n installables_with_conflicting_requires\n\n # installed\n\n # installable\n foo-2.9\n dep==1.0\n baz-5.0\n dep==2.0\n dep-1.0\n dep-2.0\n\n # wanted\n foo\n baz\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n VersionConflict\n """,\n """\n # id\n installables_with_conflicting_nested_requires\n\n # installed\n\n # installable\n foo-2.9\n dep1\n dep1-1.0\n subdep<1.0\n baz-5.0\n dep2\n dep2-1.0\n subdep>1.0\n subdep-0.9\n subdep-1.1\n\n # wanted\n foo\n baz\n\n # resolved\n VersionConflict\n\n # resolved [replace conflicting]\n VersionConflict\n """,\n """\n # id\n wanted_normalized_name_installed_canonical\n\n # installed\n foo.bar-3.6\n\n # installable\n\n # wanted\n foo-bar==3.6\n\n # resolved\n foo.bar-3.6\n\n # resolved [replace conflicting]\n foo.bar-3.6\n """,\n)\ndef test_working_set_resolve(\n installed_dists,\n installable_dists,\n requirements,\n replace_conflicting,\n resolved_dists_or_exception,\n):\n ws = pkg_resources.WorkingSet([])\n list(map(ws.add, installed_dists))\n resolve_call = functools.partial(\n ws.resolve,\n requirements,\n installer=FakeInstaller(installable_dists),\n replace_conflicting=replace_conflicting,\n )\n if inspect.isclass(resolved_dists_or_exception):\n with pytest.raises(resolved_dists_or_exception):\n resolve_call()\n else:\n assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)\n
.venv\Lib\site-packages\pkg_resources\tests\test_working_set.py
test_working_set.py
Python
8,602
0.95
0.033663
0.271357
vue-tools
32
2023-11-26T03:24:08.266048
MIT
true
862ce203a2afd91e3166ec9cfb108927
import setuptools\n\nsetuptools.setup(\n name="my-test-package",\n version="1.0",\n zip_safe=True,\n)\n
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package-source\setup.py
setup.py
Python
105
0.85
0
0
node-utils
927
2025-06-24T06:49:47.795888
BSD-3-Clause
true
28cfd701ce059b98480f845daca8b26d
\n\n
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package-source\__pycache__\setup.cpython-313.pyc
setup.cpython-313.pyc
Other
360
0.7
0
0
python-kit
76
2024-04-23T02:21:59.351631
BSD-3-Clause
true
a6c2ac89488da00a40e7f0228e9362e3
PK\n
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package-zip\my-test-package.zip
my-test-package.zip
Other
1,809
0.8
0
0.052632
vue-tools
116
2024-08-29T19:12:47.353137
Apache-2.0
true
939fbbb4250ce30c61a3f678e3d3d364
Metadata-Version: 1.0\nName: my-test-package\nVersion: 1.0\nSummary: UNKNOWN\nHome-page: UNKNOWN\nAuthor: UNKNOWN\nAuthor-email: UNKNOWN\nLicense: UNKNOWN\nDescription: UNKNOWN\nPlatform: UNKNOWN\n
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package_unpacked-egg\my_test_package-1.0-py3.7.egg\EGG-INFO\PKG-INFO
PKG-INFO
Other
187
0.7
0
0
vue-tools
543
2024-05-30T13:37:27.132941
BSD-3-Clause
true
525adedaf9a94edbb3e245cdd7b0f448
setup.cfg\nsetup.py\nmy_test_package.egg-info/PKG-INFO\nmy_test_package.egg-info/SOURCES.txt\nmy_test_package.egg-info/dependency_links.txt\nmy_test_package.egg-info/top_level.txt\nmy_test_package.egg-info/zip-safe
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package_unpacked-egg\my_test_package-1.0-py3.7.egg\EGG-INFO\SOURCES.txt
SOURCES.txt
Other
208
0.7
0
0
awesome-app
542
2024-11-14T00:06:23.663886
GPL-3.0
true
7dac05d5bb12b83222d3008bcaa5ceac
PK
.venv\Lib\site-packages\pkg_resources\tests\data\my-test-package_zipped-egg\my_test_package-1.0-py3.7.egg
my_test_package-1.0-py3.7.egg
Other
843
0.7
0
0.2
awesome-app
725
2024-12-06T05:45:38.299341
Apache-2.0
true
3b4235b7834100addda631bf46c5fd28
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_find_distributions.cpython-313.pyc
test_find_distributions.cpython-313.pyc
Other
3,409
0.8
0
0
awesome-app
338
2025-05-23T09:45:23.408688
GPL-3.0
true
979d6f32555ba5059af940ab219d1cae
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_integration_zope_interface.cpython-313.pyc
test_integration_zope_interface.cpython-313.pyc
Other
1,964
0.95
0.088235
0
vue-tools
225
2025-05-23T10:16:02.030977
GPL-3.0
true
4fbc86fb6e5e3c5bf411deacf771c71e
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_markers.cpython-313.pyc
test_markers.cpython-313.pyc
Other
625
0.7
0
0
python-kit
53
2025-04-07T18:21:04.229778
BSD-3-Clause
true
2d37034fd050911de888bbf5d6ae43f0
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_pkg_resources.cpython-313.pyc
test_pkg_resources.cpython-313.pyc
Other
25,355
0.95
0.008547
0.026906
vue-tools
826
2023-11-24T11:23:47.871099
GPL-3.0
true
90767d4477c642334265beef2aa53714
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_resources.cpython-313.pyc
test_resources.cpython-313.pyc
Other
47,906
0.95
0.011647
0.021016
python-kit
650
2024-11-24T11:12:03.501578
Apache-2.0
true
7dc269bb1447b6d946ef8ee4984e1c6c
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\test_working_set.cpython-313.pyc
test_working_set.cpython-313.pyc
Other
10,646
0.95
0
0.318713
python-kit
653
2024-06-05T01:58:28.777023
GPL-3.0
true
e9100d87fe1bac14355b2f5f7946bd85
\n\n
.venv\Lib\site-packages\pkg_resources\tests\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
194
0.7
0
0
node-utils
233
2025-04-29T02:51:09.516013
GPL-3.0
true
a3e91a26f6e7fac80715fd3a64910e8b
"""Android."""\n\nfrom __future__ import annotations\n\nimport os\nimport re\nimport sys\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, cast\n\nfrom .api import PlatformDirsABC\n\n\nclass Android(PlatformDirsABC):\n """\n Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_.\n\n Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `version\n <platformdirs.api.PlatformDirsABC.version>`, `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n\n """\n\n @property\n def user_data_dir(self) -> str:\n """:return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""\n return self._append_app_name_and_version(cast("str", _android_folder()), "files")\n\n @property\n def site_data_dir(self) -> str:\n """:return: data directory shared by users, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def user_config_dir(self) -> str:\n """\n :return: config directory tied to the user, e.g. \\n ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``\n """\n return self._append_app_name_and_version(cast("str", _android_folder()), "shared_prefs")\n\n @property\n def site_config_dir(self) -> str:\n """:return: config directory shared by the users, same as `user_config_dir`"""\n return self.user_config_dir\n\n @property\n def user_cache_dir(self) -> str:\n """:return: cache directory tied to the user, e.g.,``/data/user/<userid>/<packagename>/cache/<AppName>``"""\n return self._append_app_name_and_version(cast("str", _android_folder()), "cache")\n\n @property\n def site_cache_dir(self) -> str:\n """:return: cache directory shared by users, same as `user_cache_dir`"""\n return self.user_cache_dir\n\n @property\n def user_state_dir(self) -> str:\n """:return: state directory tied to the user, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def user_log_dir(self) -> str:\n """\n :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,\n e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``\n """\n path = self.user_cache_dir\n if self.opinion:\n path = os.path.join(path, "log") # noqa: PTH118\n return path\n\n @property\n def user_documents_dir(self) -> str:\n """:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``"""\n return _android_documents_folder()\n\n @property\n def user_downloads_dir(self) -> str:\n """:return: downloads directory tied to the user e.g. ``/storage/emulated/0/Downloads``"""\n return _android_downloads_folder()\n\n @property\n def user_pictures_dir(self) -> str:\n """:return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``"""\n return _android_pictures_folder()\n\n @property\n def user_videos_dir(self) -> str:\n """:return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``"""\n return _android_videos_folder()\n\n @property\n def user_music_dir(self) -> str:\n """:return: music directory tied to the user e.g. ``/storage/emulated/0/Music``"""\n return _android_music_folder()\n\n @property\n def user_desktop_dir(self) -> str:\n """:return: desktop directory tied to the user e.g. ``/storage/emulated/0/Desktop``"""\n return "/storage/emulated/0/Desktop"\n\n @property\n def user_runtime_dir(self) -> str:\n """\n :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,\n e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``\n """\n path = self.user_cache_dir\n if self.opinion:\n path = os.path.join(path, "tmp") # noqa: PTH118\n return path\n\n @property\n def site_runtime_dir(self) -> str:\n """:return: runtime directory shared by users, same as `user_runtime_dir`"""\n return self.user_runtime_dir\n\n\n@lru_cache(maxsize=1)\ndef _android_folder() -> str | None: # noqa: C901\n """:return: base folder for the Android OS or None if it cannot be found"""\n result: str | None = None\n # type checker isn't happy with our "import android", just don't do this when type checking see\n # https://stackoverflow.com/a/61394121\n if not TYPE_CHECKING:\n try:\n # First try to get a path to android app using python4android (if available)...\n from android import mActivity # noqa: PLC0415\n\n context = cast("android.content.Context", mActivity.getApplicationContext()) # noqa: F821\n result = context.getFilesDir().getParentFile().getAbsolutePath()\n except Exception: # noqa: BLE001\n result = None\n if result is None:\n try:\n # ...and fall back to using plain pyjnius, if python4android isn't available or doesn't deliver any useful\n # result...\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n result = context.getFilesDir().getParentFile().getAbsolutePath()\n except Exception: # noqa: BLE001\n result = None\n if result is None:\n # and if that fails, too, find an android folder looking at path on the sys.path\n # warning: only works for apps installed under /data, not adopted storage etc.\n pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")\n for path in sys.path:\n if pattern.match(path):\n result = path.split("/files")[0]\n break\n else:\n result = None\n if result is None:\n # one last try: find an android folder looking at path on the sys.path taking adopted storage paths into\n # account\n pattern = re.compile(r"/mnt/expand/[a-fA-F0-9-]{36}/(data|user/\d+)/(.+)/files")\n for path in sys.path:\n if pattern.match(path):\n result = path.split("/files")[0]\n break\n else:\n result = None\n return result\n\n\n@lru_cache(maxsize=1)\ndef _android_documents_folder() -> str:\n """:return: documents folder for the Android OS"""\n # Get directories with pyjnius\n try:\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n environment = autoclass("android.os.Environment")\n documents_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOCUMENTS).getAbsolutePath()\n except Exception: # noqa: BLE001\n documents_dir = "/storage/emulated/0/Documents"\n\n return documents_dir\n\n\n@lru_cache(maxsize=1)\ndef _android_downloads_folder() -> str:\n """:return: downloads folder for the Android OS"""\n # Get directories with pyjnius\n try:\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n environment = autoclass("android.os.Environment")\n downloads_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOWNLOADS).getAbsolutePath()\n except Exception: # noqa: BLE001\n downloads_dir = "/storage/emulated/0/Downloads"\n\n return downloads_dir\n\n\n@lru_cache(maxsize=1)\ndef _android_pictures_folder() -> str:\n """:return: pictures folder for the Android OS"""\n # Get directories with pyjnius\n try:\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n environment = autoclass("android.os.Environment")\n pictures_dir: str = context.getExternalFilesDir(environment.DIRECTORY_PICTURES).getAbsolutePath()\n except Exception: # noqa: BLE001\n pictures_dir = "/storage/emulated/0/Pictures"\n\n return pictures_dir\n\n\n@lru_cache(maxsize=1)\ndef _android_videos_folder() -> str:\n """:return: videos folder for the Android OS"""\n # Get directories with pyjnius\n try:\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n environment = autoclass("android.os.Environment")\n videos_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DCIM).getAbsolutePath()\n except Exception: # noqa: BLE001\n videos_dir = "/storage/emulated/0/DCIM/Camera"\n\n return videos_dir\n\n\n@lru_cache(maxsize=1)\ndef _android_music_folder() -> str:\n """:return: music folder for the Android OS"""\n # Get directories with pyjnius\n try:\n from jnius import autoclass # noqa: PLC0415\n\n context = autoclass("android.content.Context")\n environment = autoclass("android.os.Environment")\n music_dir: str = context.getExternalFilesDir(environment.DIRECTORY_MUSIC).getAbsolutePath()\n except Exception: # noqa: BLE001\n music_dir = "/storage/emulated/0/Music"\n\n return music_dir\n\n\n__all__ = [\n "Android",\n]\n
.venv\Lib\site-packages\platformdirs\android.py
android.py
Python
9,013
0.95
0.220884
0.07
awesome-app
520
2025-03-28T05:49:26.402300
Apache-2.0
false
4d7d8fbd0cfc9de62d8e6317079fe2f9
"""Base API."""\n\nfrom __future__ import annotations\n\nimport os\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n from typing import Literal\n\n\nclass PlatformDirsABC(ABC): # noqa: PLR0904\n """Abstract base class for platform directories."""\n\n def __init__( # noqa: PLR0913, PLR0917\n self,\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n multipath: bool = False, # noqa: FBT001, FBT002\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n ) -> None:\n """\n Create a new platform directory.\n\n :param appname: See `appname`.\n :param appauthor: See `appauthor`.\n :param version: See `version`.\n :param roaming: See `roaming`.\n :param multipath: See `multipath`.\n :param opinion: See `opinion`.\n :param ensure_exists: See `ensure_exists`.\n\n """\n self.appname = appname #: The name of application.\n self.appauthor = appauthor\n """\n The name of the app author or distributing body for this application.\n\n Typically, it is the owning company name. Defaults to `appname`. You may pass ``False`` to disable it.\n\n """\n self.version = version\n """\n An optional version path element to append to the path.\n\n You might want to use this if you want multiple versions of your app to be able to run independently. If used,\n this would typically be ``<major>.<minor>``.\n\n """\n self.roaming = roaming\n """\n Whether to use the roaming appdata directory on Windows.\n\n That means that for users on a Windows network setup for roaming profiles, this user data will be synced on\n login (see\n `here <https://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).\n\n """\n self.multipath = multipath\n """\n An optional parameter which indicates that the entire list of data dirs should be returned.\n\n By default, the first item would only be returned.\n\n """\n self.opinion = opinion #: A flag to indicating to use opinionated values.\n self.ensure_exists = ensure_exists\n """\n Optionally create the directory (and any missing parents) upon access if it does not exist.\n\n By default, no directories are created.\n\n """\n\n def _append_app_name_and_version(self, *base: str) -> str:\n params = list(base[1:])\n if self.appname:\n params.append(self.appname)\n if self.version:\n params.append(self.version)\n path = os.path.join(base[0], *params) # noqa: PTH118\n self._optionally_create_directory(path)\n return path\n\n def _optionally_create_directory(self, path: str) -> None:\n if self.ensure_exists:\n Path(path).mkdir(parents=True, exist_ok=True)\n\n def _first_item_as_path_if_multipath(self, directory: str) -> Path:\n if self.multipath:\n # If multipath is True, the first path is returned.\n directory = directory.split(os.pathsep)[0]\n return Path(directory)\n\n @property\n @abstractmethod\n def user_data_dir(self) -> str:\n """:return: data directory tied to the user"""\n\n @property\n @abstractmethod\n def site_data_dir(self) -> str:\n """:return: data directory shared by users"""\n\n @property\n @abstractmethod\n def user_config_dir(self) -> str:\n """:return: config directory tied to the user"""\n\n @property\n @abstractmethod\n def site_config_dir(self) -> str:\n """:return: config directory shared by the users"""\n\n @property\n @abstractmethod\n def user_cache_dir(self) -> str:\n """:return: cache directory tied to the user"""\n\n @property\n @abstractmethod\n def site_cache_dir(self) -> str:\n """:return: cache directory shared by users"""\n\n @property\n @abstractmethod\n def user_state_dir(self) -> str:\n """:return: state directory tied to the user"""\n\n @property\n @abstractmethod\n def user_log_dir(self) -> str:\n """:return: log directory tied to the user"""\n\n @property\n @abstractmethod\n def user_documents_dir(self) -> str:\n """:return: documents directory tied to the user"""\n\n @property\n @abstractmethod\n def user_downloads_dir(self) -> str:\n """:return: downloads directory tied to the user"""\n\n @property\n @abstractmethod\n def user_pictures_dir(self) -> str:\n """:return: pictures directory tied to the user"""\n\n @property\n @abstractmethod\n def user_videos_dir(self) -> str:\n """:return: videos directory tied to the user"""\n\n @property\n @abstractmethod\n def user_music_dir(self) -> str:\n """:return: music directory tied to the user"""\n\n @property\n @abstractmethod\n def user_desktop_dir(self) -> str:\n """:return: desktop directory tied to the user"""\n\n @property\n @abstractmethod\n def user_runtime_dir(self) -> str:\n """:return: runtime directory tied to the user"""\n\n @property\n @abstractmethod\n def site_runtime_dir(self) -> str:\n """:return: runtime directory shared by users"""\n\n @property\n def user_data_path(self) -> Path:\n """:return: data path tied to the user"""\n return Path(self.user_data_dir)\n\n @property\n def site_data_path(self) -> Path:\n """:return: data path shared by users"""\n return Path(self.site_data_dir)\n\n @property\n def user_config_path(self) -> Path:\n """:return: config path tied to the user"""\n return Path(self.user_config_dir)\n\n @property\n def site_config_path(self) -> Path:\n """:return: config path shared by the users"""\n return Path(self.site_config_dir)\n\n @property\n def user_cache_path(self) -> Path:\n """:return: cache path tied to the user"""\n return Path(self.user_cache_dir)\n\n @property\n def site_cache_path(self) -> Path:\n """:return: cache path shared by users"""\n return Path(self.site_cache_dir)\n\n @property\n def user_state_path(self) -> Path:\n """:return: state path tied to the user"""\n return Path(self.user_state_dir)\n\n @property\n def user_log_path(self) -> Path:\n """:return: log path tied to the user"""\n return Path(self.user_log_dir)\n\n @property\n def user_documents_path(self) -> Path:\n """:return: documents a path tied to the user"""\n return Path(self.user_documents_dir)\n\n @property\n def user_downloads_path(self) -> Path:\n """:return: downloads path tied to the user"""\n return Path(self.user_downloads_dir)\n\n @property\n def user_pictures_path(self) -> Path:\n """:return: pictures path tied to the user"""\n return Path(self.user_pictures_dir)\n\n @property\n def user_videos_path(self) -> Path:\n """:return: videos path tied to the user"""\n return Path(self.user_videos_dir)\n\n @property\n def user_music_path(self) -> Path:\n """:return: music path tied to the user"""\n return Path(self.user_music_dir)\n\n @property\n def user_desktop_path(self) -> Path:\n """:return: desktop path tied to the user"""\n return Path(self.user_desktop_dir)\n\n @property\n def user_runtime_path(self) -> Path:\n """:return: runtime path tied to the user"""\n return Path(self.user_runtime_dir)\n\n @property\n def site_runtime_path(self) -> Path:\n """:return: runtime path shared by users"""\n return Path(self.site_runtime_dir)\n\n def iter_config_dirs(self) -> Iterator[str]:\n """:yield: all user and site configuration directories."""\n yield self.user_config_dir\n yield self.site_config_dir\n\n def iter_data_dirs(self) -> Iterator[str]:\n """:yield: all user and site data directories."""\n yield self.user_data_dir\n yield self.site_data_dir\n\n def iter_cache_dirs(self) -> Iterator[str]:\n """:yield: all user and site cache directories."""\n yield self.user_cache_dir\n yield self.site_cache_dir\n\n def iter_runtime_dirs(self) -> Iterator[str]:\n """:yield: all user and site runtime directories."""\n yield self.user_runtime_dir\n yield self.site_runtime_dir\n\n def iter_config_paths(self) -> Iterator[Path]:\n """:yield: all user and site configuration paths."""\n for path in self.iter_config_dirs():\n yield Path(path)\n\n def iter_data_paths(self) -> Iterator[Path]:\n """:yield: all user and site data paths."""\n for path in self.iter_data_dirs():\n yield Path(path)\n\n def iter_cache_paths(self) -> Iterator[Path]:\n """:yield: all user and site cache paths."""\n for path in self.iter_cache_dirs():\n yield Path(path)\n\n def iter_runtime_paths(self) -> Iterator[Path]:\n """:yield: all user and site runtime paths."""\n for path in self.iter_runtime_dirs():\n yield Path(path)\n
.venv\Lib\site-packages\platformdirs\api.py
api.py
Python
9,277
0.95
0.204013
0.004202
react-lib
76
2025-04-04T08:18:49.850362
GPL-3.0
false
8337f99effafca4ee834e11b6e26aa49
"""macOS."""\n\nfrom __future__ import annotations\n\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom .api import PlatformDirsABC\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass MacOS(PlatformDirsABC):\n """\n Platform directories for the macOS operating system.\n\n Follows the guidance from\n `Apple documentation <https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.\n Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`,\n `version <platformdirs.api.PlatformDirsABC.version>`,\n `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n\n """\n\n @property\n def user_data_dir(self) -> str:\n """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""\n return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support")) # noqa: PTH111\n\n @property\n def site_data_dir(self) -> str:\n """\n :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``.\n If we're using a Python binary managed by `Homebrew <https://brew.sh>`_, the directory\n will be under the Homebrew prefix, e.g. ``/opt/homebrew/share/$appname/$version``.\n If `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled, and we're in Homebrew,\n the response is a multi-path string separated by ":", e.g.\n ``/opt/homebrew/share/$appname/$version:/Library/Application Support/$appname/$version``\n """\n is_homebrew = sys.prefix.startswith("/opt/homebrew")\n path_list = [self._append_app_name_and_version("/opt/homebrew/share")] if is_homebrew else []\n path_list.append(self._append_app_name_and_version("/Library/Application Support"))\n if self.multipath:\n return os.pathsep.join(path_list)\n return path_list[0]\n\n @property\n def site_data_path(self) -> Path:\n """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""\n return self._first_item_as_path_if_multipath(self.site_data_dir)\n\n @property\n def user_config_dir(self) -> str:\n """:return: config directory tied to the user, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def site_config_dir(self) -> str:\n """:return: config directory shared by the users, same as `site_data_dir`"""\n return self.site_data_dir\n\n @property\n def user_cache_dir(self) -> str:\n """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""\n return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) # noqa: PTH111\n\n @property\n def site_cache_dir(self) -> str:\n """\n :return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``.\n If we're using a Python binary managed by `Homebrew <https://brew.sh>`_, the directory\n will be under the Homebrew prefix, e.g. ``/opt/homebrew/var/cache/$appname/$version``.\n If `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled, and we're in Homebrew,\n the response is a multi-path string separated by ":", e.g.\n ``/opt/homebrew/var/cache/$appname/$version:/Library/Caches/$appname/$version``\n """\n is_homebrew = sys.prefix.startswith("/opt/homebrew")\n path_list = [self._append_app_name_and_version("/opt/homebrew/var/cache")] if is_homebrew else []\n path_list.append(self._append_app_name_and_version("/Library/Caches"))\n if self.multipath:\n return os.pathsep.join(path_list)\n return path_list[0]\n\n @property\n def site_cache_path(self) -> Path:\n """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""\n return self._first_item_as_path_if_multipath(self.site_cache_dir)\n\n @property\n def user_state_dir(self) -> str:\n """:return: state directory tied to the user, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def user_log_dir(self) -> str:\n """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""\n return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) # noqa: PTH111\n\n @property\n def user_documents_dir(self) -> str:\n """:return: documents directory tied to the user, e.g. ``~/Documents``"""\n return os.path.expanduser("~/Documents") # noqa: PTH111\n\n @property\n def user_downloads_dir(self) -> str:\n """:return: downloads directory tied to the user, e.g. ``~/Downloads``"""\n return os.path.expanduser("~/Downloads") # noqa: PTH111\n\n @property\n def user_pictures_dir(self) -> str:\n """:return: pictures directory tied to the user, e.g. ``~/Pictures``"""\n return os.path.expanduser("~/Pictures") # noqa: PTH111\n\n @property\n def user_videos_dir(self) -> str:\n """:return: videos directory tied to the user, e.g. ``~/Movies``"""\n return os.path.expanduser("~/Movies") # noqa: PTH111\n\n @property\n def user_music_dir(self) -> str:\n """:return: music directory tied to the user, e.g. ``~/Music``"""\n return os.path.expanduser("~/Music") # noqa: PTH111\n\n @property\n def user_desktop_dir(self) -> str:\n """:return: desktop directory tied to the user, e.g. ``~/Desktop``"""\n return os.path.expanduser("~/Desktop") # noqa: PTH111\n\n @property\n def user_runtime_dir(self) -> str:\n """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""\n return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) # noqa: PTH111\n\n @property\n def site_runtime_dir(self) -> str:\n """:return: runtime directory shared by users, same as `user_runtime_dir`"""\n return self.user_runtime_dir\n\n\n__all__ = [\n "MacOS",\n]\n
.venv\Lib\site-packages\platformdirs\macos.py
macos.py
Python
6,154
0.95
0.1875
0
vue-tools
951
2023-08-28T15:29:54.228790
GPL-3.0
false
d57523d85c284a26e794098ac0e6f249
"""Unix."""\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, NoReturn\n\nfrom .api import PlatformDirsABC\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n\nif sys.platform == "win32":\n\n def getuid() -> NoReturn:\n msg = "should only be used on Unix"\n raise RuntimeError(msg)\n\nelse:\n from os import getuid\n\n\nclass Unix(PlatformDirsABC): # noqa: PLR0904\n """\n On Unix/Linux, we follow the `XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-\n latest.html>`_.\n\n The spec allows overriding directories with environment variables. The examples shown are the default values,\n alongside the name of the environment variable that overrides them. Makes use of the `appname\n <platformdirs.api.PlatformDirsABC.appname>`, `version <platformdirs.api.PlatformDirsABC.version>`, `multipath\n <platformdirs.api.PlatformDirsABC.multipath>`, `opinion <platformdirs.api.PlatformDirsABC.opinion>`, `ensure_exists\n <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n\n """\n\n @property\n def user_data_dir(self) -> str:\n """\n :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or\n ``$XDG_DATA_HOME/$appname/$version``\n """\n path = os.environ.get("XDG_DATA_HOME", "")\n if not path.strip():\n path = os.path.expanduser("~/.local/share") # noqa: PTH111\n return self._append_app_name_and_version(path)\n\n @property\n def _site_data_dirs(self) -> list[str]:\n path = os.environ.get("XDG_DATA_DIRS", "")\n if not path.strip():\n path = f"/usr/local/share{os.pathsep}/usr/share"\n return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]\n\n @property\n def site_data_dir(self) -> str:\n """\n :return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is\n enabled and ``XDG_DATA_DIRS`` is set and a multi path the response is also a multi path separated by the\n OS path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``\n """\n # XDG default for $XDG_DATA_DIRS; only first, if multipath is False\n dirs = self._site_data_dirs\n if not self.multipath:\n return dirs[0]\n return os.pathsep.join(dirs)\n\n @property\n def user_config_dir(self) -> str:\n """\n :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or\n ``$XDG_CONFIG_HOME/$appname/$version``\n """\n path = os.environ.get("XDG_CONFIG_HOME", "")\n if not path.strip():\n path = os.path.expanduser("~/.config") # noqa: PTH111\n return self._append_app_name_and_version(path)\n\n @property\n def _site_config_dirs(self) -> list[str]:\n path = os.environ.get("XDG_CONFIG_DIRS", "")\n if not path.strip():\n path = "/etc/xdg"\n return [self._append_app_name_and_version(p) for p in path.split(os.pathsep)]\n\n @property\n def site_config_dir(self) -> str:\n """\n :return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`\n is enabled and ``XDG_CONFIG_DIRS`` is set and a multi path the response is also a multi path separated by\n the OS path separator), e.g. ``/etc/xdg/$appname/$version``\n """\n # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False\n dirs = self._site_config_dirs\n if not self.multipath:\n return dirs[0]\n return os.pathsep.join(dirs)\n\n @property\n def user_cache_dir(self) -> str:\n """\n :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or\n ``~/$XDG_CACHE_HOME/$appname/$version``\n """\n path = os.environ.get("XDG_CACHE_HOME", "")\n if not path.strip():\n path = os.path.expanduser("~/.cache") # noqa: PTH111\n return self._append_app_name_and_version(path)\n\n @property\n def site_cache_dir(self) -> str:\n """:return: cache directory shared by users, e.g. ``/var/cache/$appname/$version``"""\n return self._append_app_name_and_version("/var/cache")\n\n @property\n def user_state_dir(self) -> str:\n """\n :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or\n ``$XDG_STATE_HOME/$appname/$version``\n """\n path = os.environ.get("XDG_STATE_HOME", "")\n if not path.strip():\n path = os.path.expanduser("~/.local/state") # noqa: PTH111\n return self._append_app_name_and_version(path)\n\n @property\n def user_log_dir(self) -> str:\n """:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it"""\n path = self.user_state_dir\n if self.opinion:\n path = os.path.join(path, "log") # noqa: PTH118\n self._optionally_create_directory(path)\n return path\n\n @property\n def user_documents_dir(self) -> str:\n """:return: documents directory tied to the user, e.g. ``~/Documents``"""\n return _get_user_media_dir("XDG_DOCUMENTS_DIR", "~/Documents")\n\n @property\n def user_downloads_dir(self) -> str:\n """:return: downloads directory tied to the user, e.g. ``~/Downloads``"""\n return _get_user_media_dir("XDG_DOWNLOAD_DIR", "~/Downloads")\n\n @property\n def user_pictures_dir(self) -> str:\n """:return: pictures directory tied to the user, e.g. ``~/Pictures``"""\n return _get_user_media_dir("XDG_PICTURES_DIR", "~/Pictures")\n\n @property\n def user_videos_dir(self) -> str:\n """:return: videos directory tied to the user, e.g. ``~/Videos``"""\n return _get_user_media_dir("XDG_VIDEOS_DIR", "~/Videos")\n\n @property\n def user_music_dir(self) -> str:\n """:return: music directory tied to the user, e.g. ``~/Music``"""\n return _get_user_media_dir("XDG_MUSIC_DIR", "~/Music")\n\n @property\n def user_desktop_dir(self) -> str:\n """:return: desktop directory tied to the user, e.g. ``~/Desktop``"""\n return _get_user_media_dir("XDG_DESKTOP_DIR", "~/Desktop")\n\n @property\n def user_runtime_dir(self) -> str:\n """\n :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or\n ``$XDG_RUNTIME_DIR/$appname/$version``.\n\n For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/user/$(id -u)/$appname/$version`` if\n exists, otherwise ``/tmp/runtime-$(id -u)/$appname/$version``, if``$XDG_RUNTIME_DIR``\n is not set.\n """\n path = os.environ.get("XDG_RUNTIME_DIR", "")\n if not path.strip():\n if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):\n path = f"/var/run/user/{getuid()}"\n if not Path(path).exists():\n path = f"/tmp/runtime-{getuid()}" # noqa: S108\n else:\n path = f"/run/user/{getuid()}"\n return self._append_app_name_and_version(path)\n\n @property\n def site_runtime_dir(self) -> str:\n """\n :return: runtime directory shared by users, e.g. ``/run/$appname/$version`` or \\n ``$XDG_RUNTIME_DIR/$appname/$version``.\n\n Note that this behaves almost exactly like `user_runtime_dir` if ``$XDG_RUNTIME_DIR`` is set, but will\n fall back to paths associated to the root user instead of a regular logged-in user if it's not set.\n\n If you wish to ensure that a logged-in root user path is returned e.g. ``/run/user/0``, use `user_runtime_dir`\n instead.\n\n For FreeBSD/OpenBSD/NetBSD, it would return ``/var/run/$appname/$version`` if ``$XDG_RUNTIME_DIR`` is not set.\n """\n path = os.environ.get("XDG_RUNTIME_DIR", "")\n if not path.strip():\n if sys.platform.startswith(("freebsd", "openbsd", "netbsd")):\n path = "/var/run"\n else:\n path = "/run"\n return self._append_app_name_and_version(path)\n\n @property\n def site_data_path(self) -> Path:\n """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""\n return self._first_item_as_path_if_multipath(self.site_data_dir)\n\n @property\n def site_config_path(self) -> Path:\n """:return: config path shared by the users, returns the first item, even if ``multipath`` is set to ``True``"""\n return self._first_item_as_path_if_multipath(self.site_config_dir)\n\n @property\n def site_cache_path(self) -> Path:\n """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""\n return self._first_item_as_path_if_multipath(self.site_cache_dir)\n\n def iter_config_dirs(self) -> Iterator[str]:\n """:yield: all user and site configuration directories."""\n yield self.user_config_dir\n yield from self._site_config_dirs\n\n def iter_data_dirs(self) -> Iterator[str]:\n """:yield: all user and site data directories."""\n yield self.user_data_dir\n yield from self._site_data_dirs\n\n\ndef _get_user_media_dir(env_var: str, fallback_tilde_path: str) -> str:\n media_dir = _get_user_dirs_folder(env_var)\n if media_dir is None:\n media_dir = os.environ.get(env_var, "").strip()\n if not media_dir:\n media_dir = os.path.expanduser(fallback_tilde_path) # noqa: PTH111\n\n return media_dir\n\n\ndef _get_user_dirs_folder(key: str) -> str | None:\n """\n Return directory from user-dirs.dirs config file.\n\n See https://freedesktop.org/wiki/Software/xdg-user-dirs/.\n\n """\n user_dirs_config_path = Path(Unix().user_config_dir) / "user-dirs.dirs"\n if user_dirs_config_path.exists():\n parser = ConfigParser()\n\n with user_dirs_config_path.open() as stream:\n # Add fake section header, so ConfigParser doesn't complain\n parser.read_string(f"[top]\n{stream.read()}")\n\n if key not in parser["top"]:\n return None\n\n path = parser["top"][key].strip('"')\n # Handle relative home paths\n return path.replace("$HOME", os.path.expanduser("~")) # noqa: PTH111\n\n return None\n\n\n__all__ = [\n "Unix",\n]\n
.venv\Lib\site-packages\platformdirs\unix.py
unix.py
Python
10,458
0.95
0.235294
0.0181
awesome-app
306
2023-12-26T19:32:56.935070
MIT
false
c29478676a7a3761e48cdb01dd30d20c
# file generated by setuptools-scm\n# don't change, don't track in version control\n\n__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]\n\nTYPE_CHECKING = False\nif TYPE_CHECKING:\n from typing import Tuple\n from typing import Union\n\n VERSION_TUPLE = Tuple[Union[int, str], ...]\nelse:\n VERSION_TUPLE = object\n\nversion: str\n__version__: str\n__version_tuple__: VERSION_TUPLE\nversion_tuple: VERSION_TUPLE\n\n__version__ = version = '4.3.8'\n__version_tuple__ = version_tuple = (4, 3, 8)\n
.venv\Lib\site-packages\platformdirs\version.py
version.py
Python
511
0.95
0.047619
0.125
node-utils
507
2023-10-17T22:28:28.915612
GPL-3.0
false
6095c31584737e5adc33cd90a452b849
"""Windows."""\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING\n\nfrom .api import PlatformDirsABC\n\nif TYPE_CHECKING:\n from collections.abc import Callable\n\n\nclass Windows(PlatformDirsABC):\n """\n `MSDN on where to store app data files <https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid>`_.\n\n Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `appauthor\n <platformdirs.api.PlatformDirsABC.appauthor>`, `version <platformdirs.api.PlatformDirsABC.version>`, `roaming\n <platformdirs.api.PlatformDirsABC.roaming>`, `opinion <platformdirs.api.PlatformDirsABC.opinion>`, `ensure_exists\n <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n\n """\n\n @property\n def user_data_dir(self) -> str:\n """\n :return: data directory tied to the user, e.g.\n ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or\n ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)\n """\n const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"\n path = os.path.normpath(get_win_folder(const))\n return self._append_parts(path)\n\n def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:\n params = []\n if self.appname:\n if self.appauthor is not False:\n author = self.appauthor or self.appname\n params.append(author)\n params.append(self.appname)\n if opinion_value is not None and self.opinion:\n params.append(opinion_value)\n if self.version:\n params.append(self.version)\n path = os.path.join(path, *params) # noqa: PTH118\n self._optionally_create_directory(path)\n return path\n\n @property\n def site_data_dir(self) -> str:\n """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""\n path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))\n return self._append_parts(path)\n\n @property\n def user_config_dir(self) -> str:\n """:return: config directory tied to the user, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def site_config_dir(self) -> str:\n """:return: config directory shared by the users, same as `site_data_dir`"""\n return self.site_data_dir\n\n @property\n def user_cache_dir(self) -> str:\n """\n :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.\n ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``\n """\n path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))\n return self._append_parts(path, opinion_value="Cache")\n\n @property\n def site_cache_dir(self) -> str:\n """:return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``"""\n path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))\n return self._append_parts(path, opinion_value="Cache")\n\n @property\n def user_state_dir(self) -> str:\n """:return: state directory tied to the user, same as `user_data_dir`"""\n return self.user_data_dir\n\n @property\n def user_log_dir(self) -> str:\n """:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it"""\n path = self.user_data_dir\n if self.opinion:\n path = os.path.join(path, "Logs") # noqa: PTH118\n self._optionally_create_directory(path)\n return path\n\n @property\n def user_documents_dir(self) -> str:\n """:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``"""\n return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))\n\n @property\n def user_downloads_dir(self) -> str:\n """:return: downloads directory tied to the user e.g. ``%USERPROFILE%\\Downloads``"""\n return os.path.normpath(get_win_folder("CSIDL_DOWNLOADS"))\n\n @property\n def user_pictures_dir(self) -> str:\n """:return: pictures directory tied to the user e.g. ``%USERPROFILE%\\Pictures``"""\n return os.path.normpath(get_win_folder("CSIDL_MYPICTURES"))\n\n @property\n def user_videos_dir(self) -> str:\n """:return: videos directory tied to the user e.g. ``%USERPROFILE%\\Videos``"""\n return os.path.normpath(get_win_folder("CSIDL_MYVIDEO"))\n\n @property\n def user_music_dir(self) -> str:\n """:return: music directory tied to the user e.g. ``%USERPROFILE%\\Music``"""\n return os.path.normpath(get_win_folder("CSIDL_MYMUSIC"))\n\n @property\n def user_desktop_dir(self) -> str:\n """:return: desktop directory tied to the user, e.g. ``%USERPROFILE%\\Desktop``"""\n return os.path.normpath(get_win_folder("CSIDL_DESKTOPDIRECTORY"))\n\n @property\n def user_runtime_dir(self) -> str:\n """\n :return: runtime directory tied to the user, e.g.\n ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``\n """\n path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp")) # noqa: PTH118\n return self._append_parts(path)\n\n @property\n def site_runtime_dir(self) -> str:\n """:return: runtime directory shared by users, same as `user_runtime_dir`"""\n return self.user_runtime_dir\n\n\ndef get_win_folder_from_env_vars(csidl_name: str) -> str:\n """Get folder from environment variables."""\n result = get_win_folder_if_csidl_name_not_env_var(csidl_name)\n if result is not None:\n return result\n\n env_var_name = {\n "CSIDL_APPDATA": "APPDATA",\n "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",\n "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",\n }.get(csidl_name)\n if env_var_name is None:\n msg = f"Unknown CSIDL name: {csidl_name}"\n raise ValueError(msg)\n result = os.environ.get(env_var_name)\n if result is None:\n msg = f"Unset environment variable: {env_var_name}"\n raise ValueError(msg)\n return result\n\n\ndef get_win_folder_if_csidl_name_not_env_var(csidl_name: str) -> str | None:\n """Get a folder for a CSIDL name that does not exist as an environment variable."""\n if csidl_name == "CSIDL_PERSONAL":\n return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents") # noqa: PTH118\n\n if csidl_name == "CSIDL_DOWNLOADS":\n return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Downloads") # noqa: PTH118\n\n if csidl_name == "CSIDL_MYPICTURES":\n return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Pictures") # noqa: PTH118\n\n if csidl_name == "CSIDL_MYVIDEO":\n return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Videos") # noqa: PTH118\n\n if csidl_name == "CSIDL_MYMUSIC":\n return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Music") # noqa: PTH118\n return None\n\n\ndef get_win_folder_from_registry(csidl_name: str) -> str:\n """\n Get folder from the registry.\n\n This is a fallback technique at best. I'm not sure if using the registry for these guarantees us the correct answer\n for all CSIDL_* names.\n\n """\n shell_folder_name = {\n "CSIDL_APPDATA": "AppData",\n "CSIDL_COMMON_APPDATA": "Common AppData",\n "CSIDL_LOCAL_APPDATA": "Local AppData",\n "CSIDL_PERSONAL": "Personal",\n "CSIDL_DOWNLOADS": "{374DE290-123F-4565-9164-39C4925E467B}",\n "CSIDL_MYPICTURES": "My Pictures",\n "CSIDL_MYVIDEO": "My Video",\n "CSIDL_MYMUSIC": "My Music",\n }.get(csidl_name)\n if shell_folder_name is None:\n msg = f"Unknown CSIDL name: {csidl_name}"\n raise ValueError(msg)\n if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows\n raise NotImplementedError\n import winreg # noqa: PLC0415\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")\n directory, _ = winreg.QueryValueEx(key, shell_folder_name)\n return str(directory)\n\n\ndef get_win_folder_via_ctypes(csidl_name: str) -> str:\n """Get folder with ctypes."""\n # There is no 'CSIDL_DOWNLOADS'.\n # Use 'CSIDL_PROFILE' (40) and append the default folder 'Downloads' instead.\n # https://learn.microsoft.com/en-us/windows/win32/shell/knownfolderid\n\n import ctypes # noqa: PLC0415\n\n csidl_const = {\n "CSIDL_APPDATA": 26,\n "CSIDL_COMMON_APPDATA": 35,\n "CSIDL_LOCAL_APPDATA": 28,\n "CSIDL_PERSONAL": 5,\n "CSIDL_MYPICTURES": 39,\n "CSIDL_MYVIDEO": 14,\n "CSIDL_MYMUSIC": 13,\n "CSIDL_DOWNLOADS": 40,\n "CSIDL_DESKTOPDIRECTORY": 16,\n }.get(csidl_name)\n if csidl_const is None:\n msg = f"Unknown CSIDL name: {csidl_name}"\n raise ValueError(msg)\n\n buf = ctypes.create_unicode_buffer(1024)\n windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker\n windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n\n # Downgrade to short path name if it has high-bit chars.\n if any(ord(c) > 255 for c in buf): # noqa: PLR2004\n buf2 = ctypes.create_unicode_buffer(1024)\n if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n buf = buf2\n\n if csidl_name == "CSIDL_DOWNLOADS":\n return os.path.join(buf.value, "Downloads") # noqa: PTH118\n\n return buf.value\n\n\ndef _pick_get_win_folder() -> Callable[[str], str]:\n try:\n import ctypes # noqa: PLC0415\n except ImportError:\n pass\n else:\n if hasattr(ctypes, "windll"):\n return get_win_folder_via_ctypes\n try:\n import winreg # noqa: PLC0415, F401\n except ImportError:\n return get_win_folder_from_env_vars\n else:\n return get_win_folder_from_registry\n\n\nget_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())\n\n__all__ = [\n "Windows",\n]\n
.venv\Lib\site-packages\platformdirs\windows.py
windows.py
Python
10,125
0.95
0.205882
0.018182
react-lib
158
2023-12-28T19:49:35.900337
BSD-3-Clause
false
128f39361500fcc1dcaefd721a400356
"""\nUtilities for determining application-specific dirs.\n\nSee <https://github.com/platformdirs/platformdirs> for details and usage.\n\n"""\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom .api import PlatformDirsABC\nfrom .version import __version__\nfrom .version import __version_tuple__ as __version_info__\n\nif TYPE_CHECKING:\n from pathlib import Path\n from typing import Literal\n\nif sys.platform == "win32":\n from platformdirs.windows import Windows as _Result\nelif sys.platform == "darwin":\n from platformdirs.macos import MacOS as _Result\nelse:\n from platformdirs.unix import Unix as _Result\n\n\ndef _set_platform_dir_class() -> type[PlatformDirsABC]:\n if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":\n if os.getenv("SHELL") or os.getenv("PREFIX"):\n return _Result\n\n from platformdirs.android import _android_folder # noqa: PLC0415\n\n if _android_folder() is not None:\n from platformdirs.android import Android # noqa: PLC0415\n\n return Android # return to avoid redefinition of a result\n\n return _Result\n\n\nif TYPE_CHECKING:\n # Work around mypy issue: https://github.com/python/mypy/issues/10962\n PlatformDirs = _Result\nelse:\n PlatformDirs = _set_platform_dir_class() #: Currently active platform\nAppDirs = PlatformDirs #: Backwards compatibility with appdirs\n\n\ndef user_data_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: data directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_data_dir\n\n\ndef site_data_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n multipath: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: data directory shared by users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n multipath=multipath,\n ensure_exists=ensure_exists,\n ).site_data_dir\n\n\ndef user_config_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: config directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_config_dir\n\n\ndef site_config_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n multipath: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: config directory shared by the users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n multipath=multipath,\n ensure_exists=ensure_exists,\n ).site_config_dir\n\n\ndef user_cache_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: cache directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_cache_dir\n\n\ndef site_cache_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: cache directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).site_cache_dir\n\n\ndef user_state_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: state directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_state_dir\n\n\ndef user_log_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: log directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_log_dir\n\n\ndef user_documents_dir() -> str:\n """:returns: documents directory tied to the user"""\n return PlatformDirs().user_documents_dir\n\n\ndef user_downloads_dir() -> str:\n """:returns: downloads directory tied to the user"""\n return PlatformDirs().user_downloads_dir\n\n\ndef user_pictures_dir() -> str:\n """:returns: pictures directory tied to the user"""\n return PlatformDirs().user_pictures_dir\n\n\ndef user_videos_dir() -> str:\n """:returns: videos directory tied to the user"""\n return PlatformDirs().user_videos_dir\n\n\ndef user_music_dir() -> str:\n """:returns: music directory tied to the user"""\n return PlatformDirs().user_music_dir\n\n\ndef user_desktop_dir() -> str:\n """:returns: desktop directory tied to the user"""\n return PlatformDirs().user_desktop_dir\n\n\ndef user_runtime_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: runtime directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_runtime_dir\n\n\ndef site_runtime_dir(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> str:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: runtime directory shared by users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).site_runtime_dir\n\n\ndef user_data_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: data path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_data_path\n\n\ndef site_data_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n multipath: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: data path shared by users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n multipath=multipath,\n ensure_exists=ensure_exists,\n ).site_data_path\n\n\ndef user_config_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: config path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_config_path\n\n\ndef site_config_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n multipath: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: config path shared by the users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n multipath=multipath,\n ensure_exists=ensure_exists,\n ).site_config_path\n\n\ndef site_cache_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: cache directory tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).site_cache_path\n\n\ndef user_cache_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: cache path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_cache_path\n\n\ndef user_state_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n roaming: bool = False, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.roaming>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: state path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n roaming=roaming,\n ensure_exists=ensure_exists,\n ).user_state_path\n\n\ndef user_log_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: log path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_log_path\n\n\ndef user_documents_path() -> Path:\n """:returns: documents a path tied to the user"""\n return PlatformDirs().user_documents_path\n\n\ndef user_downloads_path() -> Path:\n """:returns: downloads path tied to the user"""\n return PlatformDirs().user_downloads_path\n\n\ndef user_pictures_path() -> Path:\n """:returns: pictures path tied to the user"""\n return PlatformDirs().user_pictures_path\n\n\ndef user_videos_path() -> Path:\n """:returns: videos path tied to the user"""\n return PlatformDirs().user_videos_path\n\n\ndef user_music_path() -> Path:\n """:returns: music path tied to the user"""\n return PlatformDirs().user_music_path\n\n\ndef user_desktop_path() -> Path:\n """:returns: desktop path tied to the user"""\n return PlatformDirs().user_desktop_path\n\n\ndef user_runtime_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: runtime path tied to the user\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).user_runtime_path\n\n\ndef site_runtime_path(\n appname: str | None = None,\n appauthor: str | Literal[False] | None = None,\n version: str | None = None,\n opinion: bool = True, # noqa: FBT001, FBT002\n ensure_exists: bool = False, # noqa: FBT001, FBT002\n) -> Path:\n """\n :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.\n :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.\n :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.\n :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.\n :param ensure_exists: See `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.\n :returns: runtime path shared by users\n """\n return PlatformDirs(\n appname=appname,\n appauthor=appauthor,\n version=version,\n opinion=opinion,\n ensure_exists=ensure_exists,\n ).site_runtime_path\n\n\n__all__ = [\n "AppDirs",\n "PlatformDirs",\n "PlatformDirsABC",\n "__version__",\n "__version_info__",\n "site_cache_dir",\n "site_cache_path",\n "site_config_dir",\n "site_config_path",\n "site_data_dir",\n "site_data_path",\n "site_runtime_dir",\n "site_runtime_path",\n "user_cache_dir",\n "user_cache_path",\n "user_config_dir",\n "user_config_path",\n "user_data_dir",\n "user_data_path",\n "user_desktop_dir",\n "user_desktop_path",\n "user_documents_dir",\n "user_documents_path",\n "user_downloads_dir",\n "user_downloads_path",\n "user_log_dir",\n "user_log_path",\n "user_music_dir",\n "user_music_path",\n "user_pictures_dir",\n "user_pictures_path",\n "user_runtime_dir",\n "user_runtime_path",\n "user_state_dir",\n "user_state_path",\n "user_videos_dir",\n "user_videos_path",\n]\n
.venv\Lib\site-packages\platformdirs\__init__.py
__init__.py
Python
22,284
0.95
0.064976
0.001818
react-lib
56
2024-04-11T04:45:13.019389
GPL-3.0
false
a79aa7e36581a0bf45fde4e949b56d18
"""Main entry point."""\n\nfrom __future__ import annotations\n\nfrom platformdirs import PlatformDirs, __version__\n\nPROPS = (\n "user_data_dir",\n "user_config_dir",\n "user_cache_dir",\n "user_state_dir",\n "user_log_dir",\n "user_documents_dir",\n "user_downloads_dir",\n "user_pictures_dir",\n "user_videos_dir",\n "user_music_dir",\n "user_runtime_dir",\n "site_data_dir",\n "site_config_dir",\n "site_cache_dir",\n "site_runtime_dir",\n)\n\n\ndef main() -> None:\n """Run the main entry point."""\n app_name = "MyApp"\n app_author = "MyCompany"\n\n print(f"-- platformdirs {__version__} --") # noqa: T201\n\n print("-- app dirs (with optional 'version')") # noqa: T201\n dirs = PlatformDirs(app_name, app_author, version="1.0")\n for prop in PROPS:\n print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201\n\n print("\n-- app dirs (without optional 'version')") # noqa: T201\n dirs = PlatformDirs(app_name, app_author)\n for prop in PROPS:\n print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201\n\n print("\n-- app dirs (without optional 'appauthor')") # noqa: T201\n dirs = PlatformDirs(app_name)\n for prop in PROPS:\n print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201\n\n print("\n-- app dirs (with disabled 'appauthor')") # noqa: T201\n dirs = PlatformDirs(app_name, appauthor=False)\n for prop in PROPS:\n print(f"{prop}: {getattr(dirs, prop)}") # noqa: T201\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\platformdirs\__main__.py
__main__.py
Python
1,493
0.95
0.109091
0
awesome-app
916
2024-09-19T02:54:42.369327
MIT
false
9fff79e4182d27eb4edfb33133a1ac43
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\android.cpython-313.pyc
android.cpython-313.pyc
Other
10,754
0.8
0.130435
0
node-utils
494
2024-07-14T14:36:23.788420
BSD-3-Clause
false
6cdc72b34988edeb5d6800dce6fdf195
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
13,459
0.95
0.031746
0
python-kit
314
2024-07-26T14:38:38.030526
Apache-2.0
false
b1bdd6439d29259f715ede0af0fb363f
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\macos.cpython-313.pyc
macos.cpython-313.pyc
Other
8,819
0.8
0.06383
0
vue-tools
465
2024-08-03T11:02:22.537470
Apache-2.0
false
2999ad95300e9fbbbd0aa52b11bc14f4
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\unix.cpython-313.pyc
unix.cpython-313.pyc
Other
14,759
0.8
0.094017
0
node-utils
339
2024-03-22T20:26:57.302800
BSD-3-Clause
false
366a578e394355d078e39dc6a2dd0455
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\version.cpython-313.pyc
version.cpython-313.pyc
Other
657
0.7
0
0
vue-tools
12
2025-03-25T08:23:26.805115
GPL-3.0
false
aeedde30f46185da896a1bbc22e21a29
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\windows.cpython-313.pyc
windows.cpython-313.pyc
Other
13,775
0.8
0.051724
0
awesome-app
764
2025-02-22T09:41:54.685768
Apache-2.0
false
8cae59e22c7514208f50a39c5a62b390
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
19,308
0.8
0.008065
0
awesome-app
988
2023-11-04T18:37:54.300579
MIT
false
99e3d2c62effa0c64cfbd3d660b32369
\n\n
.venv\Lib\site-packages\platformdirs\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
1,911
0.8
0
0
python-kit
186
2023-08-16T21:23:31.690510
BSD-3-Clause
false
045ef33e3739fde14eba13c978f8e754
pip\n
.venv\Lib\site-packages\platformdirs-4.3.8.dist-info\INSTALLER
INSTALLER
Other
4
0.5
0
0
python-kit
13
2024-05-03T08:49:56.312891
BSD-3-Clause
false
365c9bfeb7d89244f2ce01c1de44cb85
Metadata-Version: 2.4\nName: platformdirs\nVersion: 4.3.8\nSummary: A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`.\nProject-URL: Changelog, https://github.com/tox-dev/platformdirs/releases\nProject-URL: Documentation, https://platformdirs.readthedocs.io\nProject-URL: Homepage, https://github.com/tox-dev/platformdirs\nProject-URL: Source, https://github.com/tox-dev/platformdirs\nProject-URL: Tracker, https://github.com/tox-dev/platformdirs/issues\nMaintainer-email: Bernát Gábor <gaborjbernat@gmail.com>, Julian Berman <Julian@GrayVines.com>, Ofek Lev <oss@ofek.dev>, Ronny Pfannschmidt <opensource@ronnypfannschmidt.de>\nLicense-Expression: MIT\nLicense-File: LICENSE\nKeywords: appdirs,application,cache,directory,log,user\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python\nClassifier: Programming Language :: Python :: 3 :: Only\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: 3.13\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Topic :: Software Development :: Libraries :: Python Modules\nRequires-Python: >=3.9\nProvides-Extra: docs\nRequires-Dist: furo>=2024.8.6; extra == 'docs'\nRequires-Dist: proselint>=0.14; extra == 'docs'\nRequires-Dist: sphinx-autodoc-typehints>=3; extra == 'docs'\nRequires-Dist: sphinx>=8.1.3; extra == 'docs'\nProvides-Extra: test\nRequires-Dist: appdirs==1.4.4; extra == 'test'\nRequires-Dist: covdefaults>=2.3; extra == 'test'\nRequires-Dist: pytest-cov>=6; extra == 'test'\nRequires-Dist: pytest-mock>=3.14; extra == 'test'\nRequires-Dist: pytest>=8.3.4; extra == 'test'\nProvides-Extra: type\nRequires-Dist: mypy>=1.14.1; extra == 'type'\nDescription-Content-Type: text/x-rst\n\nThe problem\n===========\n\n.. image:: https://badge.fury.io/py/platformdirs.svg\n :target: https://badge.fury.io/py/platformdirs\n.. image:: https://img.shields.io/pypi/pyversions/platformdirs.svg\n :target: https://pypi.python.org/pypi/platformdirs/\n.. image:: https://github.com/tox-dev/platformdirs/actions/workflows/check.yaml/badge.svg\n :target: https://github.com/platformdirs/platformdirs/actions\n.. image:: https://static.pepy.tech/badge/platformdirs/month\n :target: https://pepy.tech/project/platformdirs\n\nWhen writing desktop application, finding the right location to store user data\nand configuration varies per platform. Even for single-platform apps, there\nmay by plenty of nuances in figuring out the right location.\n\nFor example, if running on macOS, you should use::\n\n ~/Library/Application Support/<AppName>\n\nIf on Windows (at least English Win) that should be::\n\n C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>\n\nor possibly::\n\n C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>\n\nfor `roaming profiles <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-vista/cc766489(v=ws.10)>`_ but that is another story.\n\nOn Linux (and other Unices), according to the `XDG Basedir Spec`_, it should be::\n\n ~/.local/share/<AppName>\n\n.. _XDG Basedir Spec: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html\n\n``platformdirs`` to the rescue\n==============================\n\nThis kind of thing is what the ``platformdirs`` package is for.\n``platformdirs`` will help you choose an appropriate:\n\n- user data dir (``user_data_dir``)\n- user config dir (``user_config_dir``)\n- user cache dir (``user_cache_dir``)\n- site data dir (``site_data_dir``)\n- site config dir (``site_config_dir``)\n- user log dir (``user_log_dir``)\n- user documents dir (``user_documents_dir``)\n- user downloads dir (``user_downloads_dir``)\n- user pictures dir (``user_pictures_dir``)\n- user videos dir (``user_videos_dir``)\n- user music dir (``user_music_dir``)\n- user desktop dir (``user_desktop_dir``)\n- user runtime dir (``user_runtime_dir``)\n\nAnd also:\n\n- Is slightly opinionated on the directory names used. Look for "OPINION" in\n documentation and code for when an opinion is being applied.\n\nExample output\n==============\n\nOn macOS:\n\n.. code-block:: pycon\n\n >>> from platformdirs import *\n >>> appname = "SuperApp"\n >>> appauthor = "Acme"\n >>> user_data_dir(appname, appauthor)\n '/Users/trentm/Library/Application Support/SuperApp'\n >>> user_config_dir(appname, appauthor)\n '/Users/trentm/Library/Application Support/SuperApp'\n >>> user_cache_dir(appname, appauthor)\n '/Users/trentm/Library/Caches/SuperApp'\n >>> site_data_dir(appname, appauthor)\n '/Library/Application Support/SuperApp'\n >>> site_config_dir(appname, appauthor)\n '/Library/Application Support/SuperApp'\n >>> user_log_dir(appname, appauthor)\n '/Users/trentm/Library/Logs/SuperApp'\n >>> user_documents_dir()\n '/Users/trentm/Documents'\n >>> user_downloads_dir()\n '/Users/trentm/Downloads'\n >>> user_pictures_dir()\n '/Users/trentm/Pictures'\n >>> user_videos_dir()\n '/Users/trentm/Movies'\n >>> user_music_dir()\n '/Users/trentm/Music'\n >>> user_desktop_dir()\n '/Users/trentm/Desktop'\n >>> user_runtime_dir(appname, appauthor)\n '/Users/trentm/Library/Caches/TemporaryItems/SuperApp'\n\nOn Windows:\n\n.. code-block:: pycon\n\n >>> from platformdirs import *\n >>> appname = "SuperApp"\n >>> appauthor = "Acme"\n >>> user_data_dir(appname, appauthor)\n 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'\n >>> user_data_dir(appname, appauthor, roaming=True)\n 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'\n >>> user_config_dir(appname, appauthor)\n 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'\n >>> user_cache_dir(appname, appauthor)\n 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'\n >>> site_data_dir(appname, appauthor)\n 'C:\\ProgramData\\Acme\\SuperApp'\n >>> site_config_dir(appname, appauthor)\n 'C:\\ProgramData\\Acme\\SuperApp'\n >>> user_log_dir(appname, appauthor)\n 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'\n >>> user_documents_dir()\n 'C:\\Users\\trentm\\Documents'\n >>> user_downloads_dir()\n 'C:\\Users\\trentm\\Downloads'\n >>> user_pictures_dir()\n 'C:\\Users\\trentm\\Pictures'\n >>> user_videos_dir()\n 'C:\\Users\\trentm\\Videos'\n >>> user_music_dir()\n 'C:\\Users\\trentm\\Music'\n >>> user_desktop_dir()\n 'C:\\Users\\trentm\\Desktop'\n >>> user_runtime_dir(appname, appauthor)\n 'C:\\Users\\trentm\\AppData\\Local\\Temp\\Acme\\SuperApp'\n\nOn Linux:\n\n.. code-block:: pycon\n\n >>> from platformdirs import *\n >>> appname = "SuperApp"\n >>> appauthor = "Acme"\n >>> user_data_dir(appname, appauthor)\n '/home/trentm/.local/share/SuperApp'\n >>> user_config_dir(appname)\n '/home/trentm/.config/SuperApp'\n >>> user_cache_dir(appname, appauthor)\n '/home/trentm/.cache/SuperApp'\n >>> site_data_dir(appname, appauthor)\n '/usr/local/share/SuperApp'\n >>> site_data_dir(appname, appauthor, multipath=True)\n '/usr/local/share/SuperApp:/usr/share/SuperApp'\n >>> site_config_dir(appname)\n '/etc/xdg/SuperApp'\n >>> os.environ["XDG_CONFIG_DIRS"] = "/etc:/usr/local/etc"\n >>> site_config_dir(appname, multipath=True)\n '/etc/SuperApp:/usr/local/etc/SuperApp'\n >>> user_log_dir(appname, appauthor)\n '/home/trentm/.local/state/SuperApp/log'\n >>> user_documents_dir()\n '/home/trentm/Documents'\n >>> user_downloads_dir()\n '/home/trentm/Downloads'\n >>> user_pictures_dir()\n '/home/trentm/Pictures'\n >>> user_videos_dir()\n '/home/trentm/Videos'\n >>> user_music_dir()\n '/home/trentm/Music'\n >>> user_desktop_dir()\n '/home/trentm/Desktop'\n >>> user_runtime_dir(appname, appauthor)\n '/run/user/{os.getuid()}/SuperApp'\n\nOn Android::\n\n >>> from platformdirs import *\n >>> appname = "SuperApp"\n >>> appauthor = "Acme"\n >>> user_data_dir(appname, appauthor)\n '/data/data/com.myApp/files/SuperApp'\n >>> user_config_dir(appname)\n '/data/data/com.myApp/shared_prefs/SuperApp'\n >>> user_cache_dir(appname, appauthor)\n '/data/data/com.myApp/cache/SuperApp'\n >>> site_data_dir(appname, appauthor)\n '/data/data/com.myApp/files/SuperApp'\n >>> site_config_dir(appname)\n '/data/data/com.myApp/shared_prefs/SuperApp'\n >>> user_log_dir(appname, appauthor)\n '/data/data/com.myApp/cache/SuperApp/log'\n >>> user_documents_dir()\n '/storage/emulated/0/Documents'\n >>> user_downloads_dir()\n '/storage/emulated/0/Downloads'\n >>> user_pictures_dir()\n '/storage/emulated/0/Pictures'\n >>> user_videos_dir()\n '/storage/emulated/0/DCIM/Camera'\n >>> user_music_dir()\n '/storage/emulated/0/Music'\n >>> user_desktop_dir()\n '/storage/emulated/0/Desktop'\n >>> user_runtime_dir(appname, appauthor)\n '/data/data/com.myApp/cache/SuperApp/tmp'\n\nNote: Some android apps like Termux and Pydroid are used as shells. These\napps are used by the end user to emulate Linux environment. Presence of\n``SHELL`` environment variable is used by Platformdirs to differentiate\nbetween general android apps and android apps used as shells. Shell android\napps also support ``XDG_*`` environment variables.\n\n\n``PlatformDirs`` for convenience\n================================\n\n.. code-block:: pycon\n\n >>> from platformdirs import PlatformDirs\n >>> dirs = PlatformDirs("SuperApp", "Acme")\n >>> dirs.user_data_dir\n '/Users/trentm/Library/Application Support/SuperApp'\n >>> dirs.user_config_dir\n '/Users/trentm/Library/Application Support/SuperApp'\n >>> dirs.user_cache_dir\n '/Users/trentm/Library/Caches/SuperApp'\n >>> dirs.site_data_dir\n '/Library/Application Support/SuperApp'\n >>> dirs.site_config_dir\n '/Library/Application Support/SuperApp'\n >>> dirs.user_cache_dir\n '/Users/trentm/Library/Caches/SuperApp'\n >>> dirs.user_log_dir\n '/Users/trentm/Library/Logs/SuperApp'\n >>> dirs.user_documents_dir\n '/Users/trentm/Documents'\n >>> dirs.user_downloads_dir\n '/Users/trentm/Downloads'\n >>> dirs.user_pictures_dir\n '/Users/trentm/Pictures'\n >>> dirs.user_videos_dir\n '/Users/trentm/Movies'\n >>> dirs.user_music_dir\n '/Users/trentm/Music'\n >>> dirs.user_desktop_dir\n '/Users/trentm/Desktop'\n >>> dirs.user_runtime_dir\n '/Users/trentm/Library/Caches/TemporaryItems/SuperApp'\n\nPer-version isolation\n=====================\n\nIf you have multiple versions of your app in use that you want to be\nable to run side-by-side, then you may want version-isolation for these\ndirs::\n\n >>> from platformdirs import PlatformDirs\n >>> dirs = PlatformDirs("SuperApp", "Acme", version="1.0")\n >>> dirs.user_data_dir\n '/Users/trentm/Library/Application Support/SuperApp/1.0'\n >>> dirs.user_config_dir\n '/Users/trentm/Library/Application Support/SuperApp/1.0'\n >>> dirs.user_cache_dir\n '/Users/trentm/Library/Caches/SuperApp/1.0'\n >>> dirs.site_data_dir\n '/Library/Application Support/SuperApp/1.0'\n >>> dirs.site_config_dir\n '/Library/Application Support/SuperApp/1.0'\n >>> dirs.user_log_dir\n '/Users/trentm/Library/Logs/SuperApp/1.0'\n >>> dirs.user_documents_dir\n '/Users/trentm/Documents'\n >>> dirs.user_downloads_dir\n '/Users/trentm/Downloads'\n >>> dirs.user_pictures_dir\n '/Users/trentm/Pictures'\n >>> dirs.user_videos_dir\n '/Users/trentm/Movies'\n >>> dirs.user_music_dir\n '/Users/trentm/Music'\n >>> dirs.user_desktop_dir\n '/Users/trentm/Desktop'\n >>> dirs.user_runtime_dir\n '/Users/trentm/Library/Caches/TemporaryItems/SuperApp/1.0'\n\nBe wary of using this for configuration files though; you'll need to handle\nmigrating configuration files manually.\n\nWhy this Fork?\n==============\n\nThis repository is a friendly fork of the wonderful work started by\n`ActiveState <https://github.com/ActiveState/appdirs>`_ who created\n``appdirs``, this package's ancestor.\n\nMaintaining an open source project is no easy task, particularly\nfrom within an organization, and the Python community is indebted\nto ``appdirs`` (and to Trent Mick and Jeff Rouse in particular) for\ncreating an incredibly useful simple module, as evidenced by the wide\nnumber of users it has attracted over the years.\n\nNonetheless, given the number of long-standing open issues\nand pull requests, and no clear path towards `ensuring\nthat maintenance of the package would continue or grow\n<https://github.com/ActiveState/appdirs/issues/79>`_, this fork was\ncreated.\n\nContributions are most welcome.\n
.venv\Lib\site-packages\platformdirs-4.3.8.dist-info\METADATA
METADATA
Other
12,831
0.95
0.031429
0
react-lib
49
2023-11-09T03:07:48.423925
MIT
false
a8384a60006235b427b28250eaacbf45
platformdirs-4.3.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nplatformdirs-4.3.8.dist-info/METADATA,sha256=EWjHyrFf1qaur3qrueXWlTHmf-9sP1ROAiZsuaY9jQA,12831\nplatformdirs-4.3.8.dist-info/RECORD,,\nplatformdirs-4.3.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87\nplatformdirs-4.3.8.dist-info/licenses/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089\nplatformdirs/__init__.py,sha256=iORRy6_lZ9tXLvO0W6fJPn8QV7F532ivl-f2WGmabBc,22284\nplatformdirs/__main__.py,sha256=HnsUQHpiBaiTxwcmwVw-nFaPdVNZtQIdi1eWDtI-MzI,1493\nplatformdirs/__pycache__/__init__.cpython-313.pyc,,\nplatformdirs/__pycache__/__main__.cpython-313.pyc,,\nplatformdirs/__pycache__/android.cpython-313.pyc,,\nplatformdirs/__pycache__/api.cpython-313.pyc,,\nplatformdirs/__pycache__/macos.cpython-313.pyc,,\nplatformdirs/__pycache__/unix.cpython-313.pyc,,\nplatformdirs/__pycache__/version.cpython-313.pyc,,\nplatformdirs/__pycache__/windows.cpython-313.pyc,,\nplatformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013\nplatformdirs/api.py,sha256=U9EzI3EYxcXWUCtIGRllqrcN99i2LSY1mq2-GtsUwEQ,9277\nplatformdirs/macos.py,sha256=UlbyFZ8Rzu3xndCqQEHrfsYTeHwYdFap1Ioz-yxveT4,6154\nplatformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nplatformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458\nplatformdirs/version.py,sha256=ddN3EcUPfer7CbqmyFNmg03R3u-qDn32T_fLsx25-Ck,511\nplatformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125\n
.venv\Lib\site-packages\platformdirs-4.3.8.dist-info\RECORD
RECORD
Other
1,549
0.7
0
0
python-kit
564
2024-03-30T02:48:31.447007
MIT
false
598b3b4e983a4cf44bd53b15afdc9604
Wheel-Version: 1.0\nGenerator: hatchling 1.27.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n
.venv\Lib\site-packages\platformdirs-4.3.8.dist-info\WHEEL
WHEEL
Other
87
0.5
0
0
vue-tools
464
2024-11-27T16:10:13.674577
Apache-2.0
false
e2fcb0ad9ea59332c808928b4b439e7a
MIT License\n\nCopyright (c) 2010-202x The platformdirs developers\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n
.venv\Lib\site-packages\platformdirs-4.3.8.dist-info\licenses\LICENSE
LICENSE
Other
1,089
0.7
0
0
react-lib
395
2025-06-10T17:39:16.721700
BSD-3-Clause
false
ea4f5a41454746a9ed111e3d8723d17a
from _plotly_utils.basevalidators import EnumeratedValidator, NumberValidator\n\n\nclass EasingValidator(EnumeratedValidator):\n def __init__(self, plotly_name="easing", parent_name="batch_animate", **_):\n super(EasingValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n values=[\n "linear",\n "quad",\n "cubic",\n "sin",\n "exp",\n "circle",\n "elastic",\n "back",\n "bounce",\n "linear-in",\n "quad-in",\n "cubic-in",\n "sin-in",\n "exp-in",\n "circle-in",\n "elastic-in",\n "back-in",\n "bounce-in",\n "linear-out",\n "quad-out",\n "cubic-out",\n "sin-out",\n "exp-out",\n "circle-out",\n "elastic-out",\n "back-out",\n "bounce-out",\n "linear-in-out",\n "quad-in-out",\n "cubic-in-out",\n "sin-in-out",\n "exp-in-out",\n "circle-in-out",\n "elastic-in-out",\n "back-in-out",\n "bounce-in-out",\n ],\n )\n\n\nclass DurationValidator(NumberValidator):\n def __init__(self, plotly_name="duration"):\n super(DurationValidator, self).__init__(\n plotly_name=plotly_name, parent_name="batch_animate", min=0\n )\n
.venv\Lib\site-packages\plotly\animation.py
animation.py
Python
1,616
0.85
0.074074
0
node-utils
166
2024-09-24T04:16:24.770322
GPL-3.0
false
bc7465e482cbba78468b1c378735144b
from copy import deepcopy\nimport pathlib\nfrom traitlets import List, Dict, observe, Integer\nfrom plotly.io._renderers import display_jupyter_version_warnings\n\nfrom .basedatatypes import BaseFigure, BasePlotlyType\nfrom .callbacks import BoxSelector, LassoSelector, InputDeviceState, Points\nfrom .serializers import custom_serializers\nimport anywidget\n\n\nclass BaseFigureWidget(BaseFigure, anywidget.AnyWidget):\n """\n Base class for FigureWidget. The FigureWidget class is code-generated as a\n subclass\n """\n\n _esm = pathlib.Path(__file__).parent / "package_data" / "widgetbundle.js"\n\n # ### _data and _layout ###\n # These properties store the current state of the traces and\n # layout as JSON-style dicts. These dicts do not store any subclasses of\n # `BasePlotlyType`\n #\n # Note: These are only automatically synced with the frontend on full\n # assignment, not on mutation. We use this fact to only directly sync\n # them to the front-end on FigureWidget construction. All other updates\n # are made using mutation, and they are manually synced to the frontend\n # using the relayout/restyle/update/etc. messages.\n _widget_layout = Dict().tag(sync=True, **custom_serializers)\n _widget_data = List().tag(sync=True, **custom_serializers)\n _config = Dict().tag(sync=True, **custom_serializers)\n\n # ### Python -> JS message properties ###\n # These properties are used to send messages from Python to the\n # frontend. Messages are sent by assigning the message contents to the\n # appropriate _py2js_* property and then immediatly assigning None to the\n # property.\n #\n # See JSDoc comments in the FigureModel class in js/src/Figure.js for\n # detailed descriptions of the messages.\n _py2js_addTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _py2js_restyle = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _py2js_relayout = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _py2js_update = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _py2js_animate = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n\n _py2js_deleteTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _py2js_moveTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n\n _py2js_removeLayoutProps = Dict(allow_none=True).tag(\n sync=True, **custom_serializers\n )\n _py2js_removeTraceProps = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n\n # ### JS -> Python message properties ###\n # These properties are used to receive messages from the frontend.\n # Messages are received by defining methods that observe changes to these\n # properties. Receive methods are named `_handler_js2py_*` where '*' is\n # the name of the corresponding message property. Receive methods are\n # responsible for setting the message property to None after retreiving\n # the message data.\n #\n # See JSDoc comments in the FigureModel class in js/src/Figure.js for\n # detailed descriptions of the messages.\n _js2py_traceDeltas = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _js2py_layoutDelta = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _js2py_restyle = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _js2py_relayout = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _js2py_update = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n _js2py_pointsCallback = Dict(allow_none=True).tag(sync=True, **custom_serializers)\n\n # ### Message tracking properties ###\n # The _last_layout_edit_id and _last_trace_edit_id properties are used\n # to keep track of the edit id of the message that most recently\n # requested an update to the Figures layout or traces respectively.\n #\n # We track this information because we don't want to update the Figure's\n # default layout/trace properties (_layout_defaults, _data_defaults)\n # while edits are in process. This can lead to inconsistent property\n # states.\n _last_layout_edit_id = Integer(0).tag(sync=True)\n _last_trace_edit_id = Integer(0).tag(sync=True)\n\n _set_trace_uid = True\n _allow_disable_validation = False\n\n # Constructor\n # -----------\n def __init__(\n self, data=None, layout=None, frames=None, skip_invalid=False, **kwargs\n ):\n # Call superclass constructors\n # ----------------------------\n # Note: We rename layout to layout_plotly because to deconflict it\n # with the `layout` constructor parameter of the `widgets.DOMWidget`\n # ipywidgets class\n super(BaseFigureWidget, self).__init__(\n data=data,\n layout_plotly=layout,\n frames=frames,\n skip_invalid=skip_invalid,\n **kwargs,\n )\n\n # Validate Frames\n # ---------------\n # Frames are not supported by figure widget\n if self._frame_objs:\n BaseFigureWidget._display_frames_error()\n\n # Message States\n # --------------\n # ### Layout ###\n\n # _last_layout_edit_id is described above\n self._last_layout_edit_id = 0\n\n # _layout_edit_in_process is set to True if there are layout edit\n # operations that have been sent to the frontend that haven't\n # completed yet.\n self._layout_edit_in_process = False\n\n # _waiting_edit_callbacks is a list of callback functions that\n # should be executed as soon as all pending edit operations are\n # completed\n self._waiting_edit_callbacks = []\n\n # ### Trace ###\n # _last_trace_edit_id: described above\n self._last_trace_edit_id = 0\n\n # _trace_edit_in_process is set to True if there are trace edit\n # operations that have been sent to the frontend that haven't\n # completed yet.\n self._trace_edit_in_process = False\n\n # View count\n # ----------\n # ipywidget property that stores the number of active frontend\n # views of this widget\n self._view_count = 0\n\n # Initialize widget layout and data for third-party widget integration\n # --------------------------------------------------------------------\n self._widget_layout = deepcopy(self._layout_obj._props)\n self._widget_data = deepcopy(self._data)\n\n def show(self, *args, **kwargs):\n return self\n\n # Python -> JavaScript Messages\n # -----------------------------\n def _send_relayout_msg(self, layout_data, source_view_id=None):\n """\n Send Plotly.relayout message to the frontend\n\n Parameters\n ----------\n layout_data : dict\n Plotly.relayout layout data\n source_view_id : str\n UID of view that triggered this relayout operation\n (e.g. By the user clicking 'zoom' in the toolbar). None if the\n operation was not triggered by a frontend view\n """\n # Increment layout edit messages IDs\n # ----------------------------------\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n # Build message\n # -------------\n msg_data = {\n "relayout_data": layout_data,\n "layout_edit_id": layout_edit_id,\n "source_view_id": source_view_id,\n }\n\n # Send message\n # ------------\n self._py2js_relayout = msg_data\n self._py2js_relayout = None\n\n def _send_restyle_msg(self, restyle_data, trace_indexes=None, source_view_id=None):\n """\n Send Plotly.restyle message to the frontend\n\n Parameters\n ----------\n restyle_data : dict\n Plotly.restyle restyle data\n trace_indexes : list[int]\n List of trace indexes that the restyle operation\n applies to\n source_view_id : str\n UID of view that triggered this restyle operation\n (e.g. By the user clicking the legend to hide a trace).\n None if the operation was not triggered by a frontend view\n """\n\n # Validate / normalize inputs\n # ---------------------------\n trace_indexes = self._normalize_trace_indexes(trace_indexes)\n\n # Increment layout/trace edit message IDs\n # ---------------------------------------\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n trace_edit_id = self._last_trace_edit_id + 1\n self._last_trace_edit_id = trace_edit_id\n self._trace_edit_in_process = True\n\n # Build message\n # -------------\n restyle_msg = {\n "restyle_data": restyle_data,\n "restyle_traces": trace_indexes,\n "trace_edit_id": trace_edit_id,\n "layout_edit_id": layout_edit_id,\n "source_view_id": source_view_id,\n }\n\n # Send message\n # ------------\n self._py2js_restyle = restyle_msg\n self._py2js_restyle = None\n\n def _send_addTraces_msg(self, new_traces_data):\n """\n Send Plotly.addTraces message to the frontend\n\n Parameters\n ----------\n new_traces_data : list[dict]\n List of trace data for new traces as accepted by Plotly.addTraces\n """\n\n # Increment layout/trace edit message IDs\n # ---------------------------------------\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n trace_edit_id = self._last_trace_edit_id + 1\n self._last_trace_edit_id = trace_edit_id\n self._trace_edit_in_process = True\n\n # Build message\n # -------------\n add_traces_msg = {\n "trace_data": new_traces_data,\n "trace_edit_id": trace_edit_id,\n "layout_edit_id": layout_edit_id,\n }\n\n # Send message\n # ------------\n self._py2js_addTraces = add_traces_msg\n self._py2js_addTraces = None\n\n def _send_moveTraces_msg(self, current_inds, new_inds):\n """\n Send Plotly.moveTraces message to the frontend\n\n Parameters\n ----------\n current_inds : list[int]\n List of current trace indexes\n new_inds : list[int]\n List of new trace indexes\n """\n\n # Build message\n # -------------\n move_msg = {"current_trace_inds": current_inds, "new_trace_inds": new_inds}\n\n # Send message\n # ------------\n self._py2js_moveTraces = move_msg\n self._py2js_moveTraces = None\n\n def _send_update_msg(\n self, restyle_data, relayout_data, trace_indexes=None, source_view_id=None\n ):\n """\n Send Plotly.update message to the frontend\n\n Parameters\n ----------\n restyle_data : dict\n Plotly.update restyle data\n relayout_data : dict\n Plotly.update relayout data\n trace_indexes : list[int]\n List of trace indexes that the update operation applies to\n source_view_id : str\n UID of view that triggered this update operation\n (e.g. By the user clicking a button).\n None if the operation was not triggered by a frontend view\n """\n\n # Validate / normalize inputs\n # ---------------------------\n trace_indexes = self._normalize_trace_indexes(trace_indexes)\n\n # Increment layout/trace edit message IDs\n # ---------------------------------------\n trace_edit_id = self._last_trace_edit_id + 1\n self._last_trace_edit_id = trace_edit_id\n self._trace_edit_in_process = True\n\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n # Build message\n # -------------\n update_msg = {\n "style_data": restyle_data,\n "layout_data": relayout_data,\n "style_traces": trace_indexes,\n "trace_edit_id": trace_edit_id,\n "layout_edit_id": layout_edit_id,\n "source_view_id": source_view_id,\n }\n\n # Send message\n # ------------\n self._py2js_update = update_msg\n self._py2js_update = None\n\n def _send_animate_msg(\n self, styles_data, relayout_data, trace_indexes, animation_opts\n ):\n """\n Send Plotly.update message to the frontend\n\n Note: there is no source_view_id parameter because animations\n triggered by the fontend are not currently supported\n\n Parameters\n ----------\n styles_data : list[dict]\n Plotly.animate styles data\n relayout_data : dict\n Plotly.animate relayout data\n trace_indexes : list[int]\n List of trace indexes that the animate operation applies to\n """\n\n # Validate / normalize inputs\n # ---------------------------\n trace_indexes = self._normalize_trace_indexes(trace_indexes)\n\n # Increment layout/trace edit message IDs\n # ---------------------------------------\n trace_edit_id = self._last_trace_edit_id + 1\n self._last_trace_edit_id = trace_edit_id\n self._trace_edit_in_process = True\n\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n # Build message\n # -------------\n animate_msg = {\n "style_data": styles_data,\n "layout_data": relayout_data,\n "style_traces": trace_indexes,\n "animation_opts": animation_opts,\n "trace_edit_id": trace_edit_id,\n "layout_edit_id": layout_edit_id,\n "source_view_id": None,\n }\n\n # Send message\n # ------------\n self._py2js_animate = animate_msg\n self._py2js_animate = None\n\n def _send_deleteTraces_msg(self, delete_inds):\n """\n Send Plotly.deleteTraces message to the frontend\n\n Parameters\n ----------\n delete_inds : list[int]\n List of trace indexes of traces to delete\n """\n\n # Increment layout/trace edit message IDs\n # ---------------------------------------\n trace_edit_id = self._last_trace_edit_id + 1\n self._last_trace_edit_id = trace_edit_id\n self._trace_edit_in_process = True\n\n layout_edit_id = self._last_layout_edit_id + 1\n self._last_layout_edit_id = layout_edit_id\n self._layout_edit_in_process = True\n\n # Build message\n # -------------\n delete_msg = {\n "delete_inds": delete_inds,\n "layout_edit_id": layout_edit_id,\n "trace_edit_id": trace_edit_id,\n }\n\n # Send message\n # ------------\n self._py2js_deleteTraces = delete_msg\n self._py2js_deleteTraces = None\n\n # JavaScript -> Python Messages\n # -----------------------------\n @observe("_js2py_traceDeltas")\n def _handler_js2py_traceDeltas(self, change):\n """\n Process trace deltas message from the frontend\n """\n\n # Receive message\n # ---------------\n msg_data = change["new"]\n if not msg_data:\n self._js2py_traceDeltas = None\n return\n\n trace_deltas = msg_data["trace_deltas"]\n trace_edit_id = msg_data["trace_edit_id"]\n\n # Apply deltas\n # ------------\n # We only apply the deltas if this message corresponds to the most\n # recent trace edit operation\n if trace_edit_id == self._last_trace_edit_id:\n # ### Loop over deltas ###\n for delta in trace_deltas:\n # #### Find existing trace for uid ###\n trace_uid = delta["uid"]\n trace_uids = [trace.uid for trace in self.data]\n trace_index = trace_uids.index(trace_uid)\n uid_trace = self.data[trace_index]\n\n # #### Transform defaults to delta ####\n delta_transform = BaseFigureWidget._transform_data(\n uid_trace._prop_defaults, delta\n )\n\n # #### Remove overlapping properties ####\n # If a property is present in both _props and _prop_defaults\n # then we remove the copy from _props\n remove_props = self._remove_overlapping_props(\n uid_trace._props, uid_trace._prop_defaults\n )\n\n # #### Notify frontend model of property removal ####\n if remove_props:\n remove_trace_props_msg = {\n "remove_trace": trace_index,\n "remove_props": remove_props,\n }\n self._py2js_removeTraceProps = remove_trace_props_msg\n self._py2js_removeTraceProps = None\n\n # #### Dispatch change callbacks ####\n self._dispatch_trace_change_callbacks(delta_transform, [trace_index])\n\n # ### Trace edits no longer in process ###\n self._trace_edit_in_process = False\n\n # ### Call any waiting trace edit callbacks ###\n if not self._layout_edit_in_process:\n while self._waiting_edit_callbacks:\n self._waiting_edit_callbacks.pop()()\n\n self._js2py_traceDeltas = None\n\n @observe("_js2py_layoutDelta")\n def _handler_js2py_layoutDelta(self, change):\n """\n Process layout delta message from the frontend\n """\n\n # Receive message\n # ---------------\n msg_data = change["new"]\n if not msg_data:\n self._js2py_layoutDelta = None\n return\n\n layout_delta = msg_data["layout_delta"]\n layout_edit_id = msg_data["layout_edit_id"]\n\n # Apply delta\n # -----------\n # We only apply the delta if this message corresponds to the most\n # recent layout edit operation\n if layout_edit_id == self._last_layout_edit_id:\n # ### Transform defaults to delta ###\n delta_transform = BaseFigureWidget._transform_data(\n self._layout_defaults, layout_delta\n )\n\n # ### Remove overlapping properties ###\n # If a property is present in both _layout and _layout_defaults\n # then we remove the copy from _layout\n removed_props = self._remove_overlapping_props(\n self._widget_layout, self._layout_defaults\n )\n\n # ### Notify frontend model of property removal ###\n if removed_props:\n remove_props_msg = {"remove_props": removed_props}\n\n self._py2js_removeLayoutProps = remove_props_msg\n self._py2js_removeLayoutProps = None\n\n # ### Create axis objects ###\n # For example, when a SPLOM trace is created the layout defaults\n # may include axes that weren't explicitly defined by the user.\n for proppath in delta_transform:\n prop = proppath[0]\n match = self.layout._subplot_re_match(prop)\n if match and prop not in self.layout:\n # We need to create a subplotid object\n self.layout[prop] = {}\n\n # ### Dispatch change callbacks ###\n self._dispatch_layout_change_callbacks(delta_transform)\n\n # ### Layout edits no longer in process ###\n self._layout_edit_in_process = False\n\n # ### Call any waiting layout edit callbacks ###\n if not self._trace_edit_in_process:\n while self._waiting_edit_callbacks:\n self._waiting_edit_callbacks.pop()()\n\n self._js2py_layoutDelta = None\n\n @observe("_js2py_restyle")\n def _handler_js2py_restyle(self, change):\n """\n Process Plotly.restyle message from the frontend\n """\n\n # Receive message\n # ---------------\n restyle_msg = change["new"]\n\n if not restyle_msg:\n self._js2py_restyle = None\n return\n\n style_data = restyle_msg["style_data"]\n style_traces = restyle_msg["style_traces"]\n source_view_id = restyle_msg["source_view_id"]\n\n # Perform restyle\n # ---------------\n self.plotly_restyle(\n restyle_data=style_data,\n trace_indexes=style_traces,\n source_view_id=source_view_id,\n )\n\n self._js2py_restyle = None\n\n @observe("_js2py_update")\n def _handler_js2py_update(self, change):\n """\n Process Plotly.update message from the frontend\n """\n\n # Receive message\n # ---------------\n update_msg = change["new"]\n\n if not update_msg:\n self._js2py_update = None\n return\n\n style = update_msg["style_data"]\n trace_indexes = update_msg["style_traces"]\n layout = update_msg["layout_data"]\n source_view_id = update_msg["source_view_id"]\n\n # Perform update\n # --------------\n self.plotly_update(\n restyle_data=style,\n relayout_data=layout,\n trace_indexes=trace_indexes,\n source_view_id=source_view_id,\n )\n\n self._js2py_update = None\n\n @observe("_js2py_relayout")\n def _handler_js2py_relayout(self, change):\n """\n Process Plotly.relayout message from the frontend\n """\n\n # Receive message\n # ---------------\n relayout_msg = change["new"]\n\n if not relayout_msg:\n self._js2py_relayout = None\n return\n\n relayout_data = relayout_msg["relayout_data"]\n source_view_id = relayout_msg["source_view_id"]\n\n if "lastInputTime" in relayout_data:\n # Remove 'lastInputTime'. Seems to be an internal plotly\n # property that is introduced for some plot types, but it is not\n # actually a property in the schema\n relayout_data.pop("lastInputTime")\n\n # Perform relayout\n # ----------------\n self.plotly_relayout(relayout_data=relayout_data, source_view_id=source_view_id)\n\n self._js2py_relayout = None\n\n @observe("_js2py_pointsCallback")\n def _handler_js2py_pointsCallback(self, change):\n """\n Process points callback message from the frontend\n """\n\n # Receive message\n # ---------------\n callback_data = change["new"]\n\n if not callback_data:\n self._js2py_pointsCallback = None\n return\n\n # Get event type\n # --------------\n event_type = callback_data["event_type"]\n\n # Build Selector Object\n # ---------------------\n if callback_data.get("selector", None):\n selector_data = callback_data["selector"]\n selector_type = selector_data["type"]\n selector_state = selector_data["selector_state"]\n if selector_type == "box":\n selector = BoxSelector(**selector_state)\n elif selector_type == "lasso":\n selector = LassoSelector(**selector_state)\n else:\n raise ValueError("Unsupported selector type: %s" % selector_type)\n else:\n selector = None\n\n # Build Input Device State Object\n # -------------------------------\n if callback_data.get("device_state", None):\n device_state_data = callback_data["device_state"]\n state = InputDeviceState(**device_state_data)\n else:\n state = None\n\n # Build Trace Points Dictionary\n # -----------------------------\n points_data = callback_data["points"]\n trace_points = {\n trace_ind: {\n "point_inds": [],\n "xs": [],\n "ys": [],\n "trace_name": self._data_objs[trace_ind].name,\n "trace_index": trace_ind,\n }\n for trace_ind in range(len(self._data_objs))\n }\n\n for x, y, point_ind, trace_ind in zip(\n points_data["xs"],\n points_data["ys"],\n points_data["point_indexes"],\n points_data["trace_indexes"],\n ):\n trace_dict = trace_points[trace_ind]\n trace_dict["xs"].append(x)\n trace_dict["ys"].append(y)\n trace_dict["point_inds"].append(point_ind)\n\n # Dispatch callbacks\n # ------------------\n for trace_ind, trace_points_data in trace_points.items():\n points = Points(**trace_points_data)\n trace = self.data[trace_ind]\n\n if event_type == "plotly_click":\n trace._dispatch_on_click(points, state)\n elif event_type == "plotly_hover":\n trace._dispatch_on_hover(points, state)\n elif event_type == "plotly_unhover":\n trace._dispatch_on_unhover(points, state)\n elif event_type == "plotly_selected":\n trace._dispatch_on_selection(points, selector)\n elif event_type == "plotly_deselect":\n trace._dispatch_on_deselect(points)\n\n self._js2py_pointsCallback = None\n\n # Display\n # -------\n def _repr_html_(self):\n """\n Customize html representation\n """\n raise NotImplementedError # Prefer _repr_mimebundle_\n\n def _repr_mimebundle_(self, include=None, exclude=None, validate=True, **kwargs):\n """\n Return mimebundle corresponding to default renderer.\n """\n display_jupyter_version_warnings()\n\n # Widget layout and data need to be set here in case there are\n # changes made to the figure after the widget is created but before\n # the cell is run.\n self._widget_layout = deepcopy(self._layout_obj._props)\n self._widget_data = deepcopy(self._data)\n return {\n "application/vnd.jupyter.widget-view+json": {\n "version_major": 2,\n "version_minor": 0,\n "model_id": self._model_id,\n },\n }\n\n def _ipython_display_(self):\n """\n Handle rich display of figures in ipython contexts\n """\n raise NotImplementedError # Prefer _repr_mimebundle_\n\n # Callbacks\n # ---------\n def on_edits_completed(self, fn):\n """\n Register a function to be called after all pending trace and layout\n edit operations have completed\n\n If there are no pending edit operations then function is called\n immediately\n\n Parameters\n ----------\n fn : callable\n Function of zero arguments to be called when all pending edit\n operations have completed\n """\n if self._layout_edit_in_process or self._trace_edit_in_process:\n self._waiting_edit_callbacks.append(fn)\n else:\n fn()\n\n # Validate No Frames\n # ------------------\n @property\n def frames(self):\n # Note: This property getter is identical to that of the superclass,\n # but it must be included here because we're overriding the setter\n # below.\n return self._frame_objs\n\n @frames.setter\n def frames(self, new_frames):\n if new_frames:\n BaseFigureWidget._display_frames_error()\n\n @staticmethod\n def _display_frames_error():\n """\n Display an informative error when user attempts to set frames on a\n FigureWidget\n\n Raises\n ------\n ValueError\n always\n """\n msg = """\nFrames are not supported by the plotly.graph_objs.FigureWidget class.\nNote: Frames are supported by the plotly.graph_objs.Figure class"""\n raise ValueError(msg)\n\n # Static Helpers\n # --------------\n @staticmethod\n def _remove_overlapping_props(input_data, delta_data, prop_path=()):\n """\n Remove properties in input_data that are also in delta_data, and do so\n recursively.\n\n Exception: Never remove 'uid' from input_data, this property is used\n to align traces\n\n Parameters\n ----------\n input_data : dict|list\n delta_data : dict|list\n\n Returns\n -------\n list[tuple[str|int]]\n List of removed property path tuples\n """\n\n # Initialize removed\n # ------------------\n # This is the list of path tuples to the properties that were\n # removed from input_data\n removed = []\n\n # Handle dict\n # -----------\n if isinstance(input_data, dict):\n assert isinstance(delta_data, dict)\n\n for p, delta_val in delta_data.items():\n if isinstance(delta_val, dict) or BaseFigure._is_dict_list(delta_val):\n if p in input_data:\n # ### Recurse ###\n input_val = input_data[p]\n recur_prop_path = prop_path + (p,)\n recur_removed = BaseFigureWidget._remove_overlapping_props(\n input_val, delta_val, recur_prop_path\n )\n removed.extend(recur_removed)\n\n # Check whether the last property in input_val\n # has been removed. If so, remove it entirely\n if not input_val:\n input_data.pop(p)\n removed.append(recur_prop_path)\n\n elif p in input_data and p != "uid":\n # ### Remove property ###\n input_data.pop(p)\n removed.append(prop_path + (p,))\n\n # Handle list\n # -----------\n elif isinstance(input_data, list):\n assert isinstance(delta_data, list)\n\n for i, delta_val in enumerate(delta_data):\n if i >= len(input_data):\n break\n\n input_val = input_data[i]\n if (\n input_val is not None\n and isinstance(delta_val, dict)\n or BaseFigure._is_dict_list(delta_val)\n ):\n # ### Recurse ###\n recur_prop_path = prop_path + (i,)\n recur_removed = BaseFigureWidget._remove_overlapping_props(\n input_val, delta_val, recur_prop_path\n )\n\n removed.extend(recur_removed)\n\n return removed\n\n @staticmethod\n def _transform_data(to_data, from_data, should_remove=True, relayout_path=()):\n """\n Transform to_data into from_data and return relayout-style\n description of the transformation\n\n Parameters\n ----------\n to_data : dict|list\n from_data : dict|list\n\n Returns\n -------\n dict\n relayout-style description of the transformation\n """\n\n # Initialize relayout data\n # ------------------------\n relayout_data = {}\n\n # Handle dict\n # -----------\n if isinstance(to_data, dict):\n # ### Validate from_data ###\n if not isinstance(from_data, dict):\n raise ValueError(\n "Mismatched data types: {to_dict} {from_data}".format(\n to_dict=to_data, from_data=from_data\n )\n )\n\n # ### Add/modify properties ###\n # Loop over props/vals\n for from_prop, from_val in from_data.items():\n # #### Handle compound vals recursively ####\n if isinstance(from_val, dict) or BaseFigure._is_dict_list(from_val):\n # ##### Init property value if needed #####\n if from_prop not in to_data:\n to_data[from_prop] = {} if isinstance(from_val, dict) else []\n\n # ##### Transform property val recursively #####\n input_val = to_data[from_prop]\n relayout_data.update(\n BaseFigureWidget._transform_data(\n input_val,\n from_val,\n should_remove=should_remove,\n relayout_path=relayout_path + (from_prop,),\n )\n )\n\n # #### Handle simple vals directly ####\n else:\n if from_prop not in to_data or not BasePlotlyType._vals_equal(\n to_data[from_prop], from_val\n ):\n to_data[from_prop] = from_val\n relayout_path_prop = relayout_path + (from_prop,)\n relayout_data[relayout_path_prop] = from_val\n\n # ### Remove properties ###\n if should_remove:\n for remove_prop in set(to_data.keys()).difference(\n set(from_data.keys())\n ):\n to_data.pop(remove_prop)\n\n # Handle list\n # -----------\n elif isinstance(to_data, list):\n # ### Validate from_data ###\n if not isinstance(from_data, list):\n raise ValueError(\n "Mismatched data types: to_data: {to_data} {from_data}".format(\n to_data=to_data, from_data=from_data\n )\n )\n\n # ### Add/modify properties ###\n # Loop over indexes / elements\n for i, from_val in enumerate(from_data):\n # #### Initialize element if needed ####\n if i >= len(to_data):\n to_data.append(None)\n input_val = to_data[i]\n\n # #### Handle compound element recursively ####\n if input_val is not None and (\n isinstance(from_val, dict) or BaseFigure._is_dict_list(from_val)\n ):\n relayout_data.update(\n BaseFigureWidget._transform_data(\n input_val,\n from_val,\n should_remove=should_remove,\n relayout_path=relayout_path + (i,),\n )\n )\n\n # #### Handle simple elements directly ####\n else:\n if not BasePlotlyType._vals_equal(to_data[i], from_val):\n to_data[i] = from_val\n relayout_data[relayout_path + (i,)] = from_val\n\n return relayout_data\n
.venv\Lib\site-packages\plotly\basewidget.py
basewidget.py
Python
34,931
0.95
0.104146
0.272509
react-lib
430
2023-10-02T13:41:05.119752
GPL-3.0
false
8f9cd0d518af4d5a31027fa103a7c08c
from plotly.utils import _list_repr_elided\n\n\nclass InputDeviceState:\n def __init__(\n self, ctrl=None, alt=None, shift=None, meta=None, button=None, buttons=None, **_\n ):\n self._ctrl = ctrl\n self._alt = alt\n self._meta = meta\n self._shift = shift\n self._button = button\n self._buttons = buttons\n\n def __repr__(self):\n return """\\nInputDeviceState(\n ctrl={ctrl},\n alt={alt},\n shift={shift},\n meta={meta},\n button={button},\n buttons={buttons})""".format(\n ctrl=repr(self.ctrl),\n alt=repr(self.alt),\n meta=repr(self.meta),\n shift=repr(self.shift),\n button=repr(self.button),\n buttons=repr(self.buttons),\n )\n\n @property\n def alt(self):\n """\n Whether alt key pressed\n\n Returns\n -------\n bool\n """\n return self._alt\n\n @property\n def ctrl(self):\n """\n Whether ctrl key pressed\n\n Returns\n -------\n bool\n """\n return self._ctrl\n\n @property\n def shift(self):\n """\n Whether shift key pressed\n\n Returns\n -------\n bool\n """\n return self._shift\n\n @property\n def meta(self):\n """\n Whether meta key pressed\n\n Returns\n -------\n bool\n """\n return self._meta\n\n @property\n def button(self):\n """\n Integer code for the button that was pressed on the mouse to trigger\n the event\n\n - 0: Main button pressed, usually the left button or the\n un-initialized state\n - 1: Auxiliary button pressed, usually the wheel button or the middle\n button (if present)\n - 2: Secondary button pressed, usually the right button\n - 3: Fourth button, typically the Browser Back button\n - 4: Fifth button, typically the Browser Forward button\n\n Returns\n -------\n int\n """\n return self._button\n\n @property\n def buttons(self):\n """\n Integer code for which combination of buttons are pressed on the\n mouse when the event is triggered.\n\n - 0: No button or un-initialized\n - 1: Primary button (usually left)\n - 2: Secondary button (usually right)\n - 4: Auxilary button (usually middle or mouse wheel button)\n - 8: 4th button (typically the "Browser Back" button)\n - 16: 5th button (typically the "Browser Forward" button)\n\n Combinations of buttons are represented as the decimal form of the\n bitmask of the values above.\n\n For example, pressing both the primary (1) and auxilary (4) buttons\n will result in a code of 5\n\n Returns\n -------\n int\n """\n return self._buttons\n\n\nclass Points:\n def __init__(self, point_inds=[], xs=[], ys=[], trace_name=None, trace_index=None):\n self._point_inds = point_inds\n self._xs = xs\n self._ys = ys\n self._trace_name = trace_name\n self._trace_index = trace_index\n\n def __repr__(self):\n return """\\nPoints(point_inds={point_inds},\n xs={xs},\n ys={ys},\n trace_name={trace_name},\n trace_index={trace_index})""".format(\n point_inds=_list_repr_elided(\n self.point_inds, indent=len("Points(point_inds=")\n ),\n xs=_list_repr_elided(self.xs, indent=len(" xs=")),\n ys=_list_repr_elided(self.ys, indent=len(" ys=")),\n trace_name=repr(self.trace_name),\n trace_index=repr(self.trace_index),\n )\n\n @property\n def point_inds(self):\n """\n List of selected indexes into the trace's points\n\n Returns\n -------\n list[int]\n """\n return self._point_inds\n\n @property\n def xs(self):\n """\n List of x-coordinates of selected points\n\n Returns\n -------\n list[float]\n """\n return self._xs\n\n @property\n def ys(self):\n """\n List of y-coordinates of selected points\n\n Returns\n -------\n list[float]\n """\n return self._ys\n\n @property\n def trace_name(self):\n """\n Name of the trace\n\n Returns\n -------\n str\n """\n return self._trace_name\n\n @property\n def trace_index(self):\n """\n Index of the trace in the figure\n\n Returns\n -------\n int\n """\n return self._trace_index\n\n\nclass BoxSelector:\n def __init__(self, xrange=None, yrange=None, **_):\n self._type = "box"\n self._xrange = xrange\n self._yrange = yrange\n\n def __repr__(self):\n return """\\nBoxSelector(xrange={xrange},\n yrange={yrange})""".format(xrange=self.xrange, yrange=self.yrange)\n\n @property\n def type(self):\n """\n The selector's type\n\n Returns\n -------\n str\n """\n return self._type\n\n @property\n def xrange(self):\n """\n x-axis range extents of the box selection\n\n Returns\n -------\n (float, float)\n """\n return self._xrange\n\n @property\n def yrange(self):\n """\n y-axis range extents of the box selection\n\n Returns\n -------\n (float, float)\n """\n return self._yrange\n\n\nclass LassoSelector:\n def __init__(self, xs=None, ys=None, **_):\n self._type = "lasso"\n self._xs = xs\n self._ys = ys\n\n def __repr__(self):\n return """\\nLassoSelector(xs={xs},\n ys={ys})""".format(\n xs=_list_repr_elided(self.xs, indent=len("LassoSelector(xs=")),\n ys=_list_repr_elided(self.ys, indent=len(" ys=")),\n )\n\n @property\n def type(self):\n """\n The selector's type\n\n Returns\n -------\n str\n """\n return self._type\n\n @property\n def xs(self):\n """\n list of x-axis coordinates of each point in the lasso selection\n boundary\n\n Returns\n -------\n list[float]\n """\n return self._xs\n\n @property\n def ys(self):\n """\n list of y-axis coordinates of each point in the lasso selection\n boundary\n\n Returns\n -------\n list[float]\n """\n return self._ys\n
.venv\Lib\site-packages\plotly\callbacks.py
callbacks.py
Python
6,468
0.85
0.108844
0
node-utils
635
2025-02-04T02:11:22.793417
GPL-3.0
false
d5415ee394b5265a389d2e96db20bd80
import os\n\n\ndef pytest_ignore_collect(path):\n # Ignored files, most of them are raising a chart studio error\n ignored_paths = [\n "exploding_module.py",\n "chunked_requests.py",\n "v2.py",\n "v1.py",\n "presentation_objs.py",\n "widgets.py",\n "dashboard_objs.py",\n "grid_objs.py",\n "config.py",\n "presentation_objs.py",\n "session.py",\n ]\n if (\n os.path.basename(str(path)) in ignored_paths\n or "plotly/plotly/plotly/__init__.py" in str(path)\n or "plotly/api/utils.py" in str(path)\n ):\n return True\n
.venv\Lib\site-packages\plotly\conftest.py
conftest.py
Python
612
0.95
0.083333
0.045455
react-lib
683
2024-06-20T22:00:37.584470
BSD-3-Clause
true
e435952f93fa90275cab4ddc219ac1e6
from _plotly_utils.exceptions import PlotlyError # noqa: F401\n
.venv\Lib\site-packages\plotly\exceptions.py
exceptions.py
Python
63
0.75
0
0
vue-tools
609
2025-03-24T15:02:20.191585
GPL-3.0
false
bc9775dd844de2bf720e26b8baff03b7
from _plotly_utils.files import PLOTLY_DIR, ensure_writable_plotly_dir # noqa: F401\n
.venv\Lib\site-packages\plotly\files.py
files.py
Python
85
0.75
0
0
react-lib
862
2023-07-28T13:39:56.145262
MIT
false
783978d270c5cdcb9ca15752ec77b115
from .basedatatypes import BaseFigure\n\n\nclass FigureWidget(BaseFigure):\n """\n FigureWidget stand-in for use when anywidget is not installed. The only purpose\n of this class is to provide something to import as\n `plotly.graph_objs.FigureWidget` when anywidget is not installed. This class\n simply raises an informative error message when the constructor is called\n """\n\n def __init__(self, *args, **kwargs):\n raise ImportError("Please install anywidget to use the FigureWidget class")\n
.venv\Lib\site-packages\plotly\missing_anywidget.py
missing_anywidget.py
Python
512
0.85
0.461538
0
vue-tools
67
2024-02-13T22:35:17.797195
Apache-2.0
false
839e18f39ae984d1ce0ef343b51af965
from _plotly_utils.optional_imports import get_module # noqa: F401\n
.venv\Lib\site-packages\plotly\optional_imports.py
optional_imports.py
Python
68
0.75
0
0
vue-tools
703
2024-07-03T03:46:13.570206
BSD-3-Clause
false
6b5d055af804cd447adaed46955ba802
from .basedatatypes import Undefined\nfrom .optional_imports import get_module\n\nnp = get_module("numpy")\n\n\ndef _py_to_js(v, widget_manager):\n """\n Python -> Javascript ipywidget serializer\n\n This function must repalce all objects that the ipywidget library\n can't serialize natively (e.g. numpy arrays) with serializable\n representations\n\n Parameters\n ----------\n v\n Object to be serialized\n widget_manager\n ipywidget widget_manager (unused)\n\n Returns\n -------\n any\n Value that the ipywidget library can serialize natively\n """\n\n # Handle dict recursively\n # -----------------------\n if isinstance(v, dict):\n return {k: _py_to_js(v, widget_manager) for k, v in v.items()}\n\n # Handle list/tuple recursively\n # -----------------------------\n elif isinstance(v, (list, tuple)):\n return [_py_to_js(v, widget_manager) for v in v]\n\n # Handle numpy array\n # ------------------\n elif np is not None and isinstance(v, np.ndarray):\n # Convert 1D numpy arrays with numeric types to memoryviews with\n # datatype and shape metadata.\n if (\n v.ndim == 1\n and v.dtype.kind in ["u", "i", "f"]\n and v.dtype != "int64"\n and v.dtype != "uint64"\n ):\n # We have a numpy array the we can directly map to a JavaScript\n # Typed array\n return {"buffer": memoryview(v), "dtype": str(v.dtype), "shape": v.shape}\n else:\n # Convert all other numpy arrays to lists\n return v.tolist()\n\n # Handle Undefined\n # ----------------\n if v is Undefined:\n return "_undefined_"\n\n # Handle simple value\n # -------------------\n else:\n return v\n\n\ndef _js_to_py(v, widget_manager):\n """\n Javascript -> Python ipywidget deserializer\n\n Parameters\n ----------\n v\n Object to be deserialized\n widget_manager\n ipywidget widget_manager (unused)\n\n Returns\n -------\n any\n Deserialized object for use by the Python side of the library\n """\n # Handle dict\n # -----------\n if isinstance(v, dict):\n return {k: _js_to_py(v, widget_manager) for k, v in v.items()}\n\n # Handle list/tuple\n # -----------------\n elif isinstance(v, (list, tuple)):\n return [_js_to_py(v, widget_manager) for v in v]\n\n # Handle Undefined\n # ----------------\n elif isinstance(v, str) and v == "_undefined_":\n return Undefined\n\n # Handle simple value\n # -------------------\n else:\n return v\n\n\n# Custom serializer dict for use in ipywidget traitlet definitions\ncustom_serializers = {"from_json": _js_to_py, "to_json": _py_to_js}\n
.venv\Lib\site-packages\plotly\serializers.py
serializers.py
Python
2,722
0.95
0.12381
0.282353
react-lib
753
2025-03-25T00:02:43.310672
Apache-2.0
false
d453400e157b84cf58d28572d0b745d1
# some functions defined here to avoid numpy import\n\n\ndef _mean(x):\n if len(x) == 0:\n raise ValueError("x must have positive length")\n return float(sum(x)) / len(x)\n\n\ndef _argmin(x):\n return sorted(enumerate(x), key=lambda t: t[1])[0][0]\n\n\ndef _argmax(x):\n return sorted(enumerate(x), key=lambda t: t[1], reverse=True)[0][0]\n\n\ndef _df_anno(xanchor, yanchor, x, y):\n """Default annotation parameters"""\n return dict(xanchor=xanchor, yanchor=yanchor, x=x, y=y, showarrow=False)\n\n\ndef _add_inside_to_position(pos):\n if not ("inside" in pos or "outside" in pos):\n pos.add("inside")\n return pos\n\n\ndef _prepare_position(position, prepend_inside=False):\n if position is None:\n position = "top right"\n pos_str = position\n position = set(position.split(" "))\n if prepend_inside:\n position = _add_inside_to_position(position)\n return position, pos_str\n\n\ndef annotation_params_for_line(shape_type, shape_args, position):\n # all x0, x1, y0, y1 are used to place the annotation, that way it could\n # work with a slanted line\n # even with a slanted line, there are the horizontal and vertical\n # conventions of placing a shape\n x0 = shape_args["x0"]\n x1 = shape_args["x1"]\n y0 = shape_args["y0"]\n y1 = shape_args["y1"]\n X = [x0, x1]\n Y = [y0, y1]\n R = "right"\n T = "top"\n L = "left"\n C = "center"\n B = "bottom"\n M = "middle"\n aY = max(Y)\n iY = min(Y)\n eY = _mean(Y)\n aaY = _argmax(Y)\n aiY = _argmin(Y)\n aX = max(X)\n iX = min(X)\n eX = _mean(X)\n aaX = _argmax(X)\n aiX = _argmin(X)\n position, pos_str = _prepare_position(position)\n if shape_type == "vline":\n if position == set(["top", "left"]):\n return _df_anno(R, T, X[aaY], aY)\n if position == set(["top", "right"]):\n return _df_anno(L, T, X[aaY], aY)\n if position == set(["top"]):\n return _df_anno(C, B, X[aaY], aY)\n if position == set(["bottom", "left"]):\n return _df_anno(R, B, X[aiY], iY)\n if position == set(["bottom", "right"]):\n return _df_anno(L, B, X[aiY], iY)\n if position == set(["bottom"]):\n return _df_anno(C, T, X[aiY], iY)\n if position == set(["left"]):\n return _df_anno(R, M, eX, eY)\n if position == set(["right"]):\n return _df_anno(L, M, eX, eY)\n elif shape_type == "hline":\n if position == set(["top", "left"]):\n return _df_anno(L, B, iX, Y[aiX])\n if position == set(["top", "right"]):\n return _df_anno(R, B, aX, Y[aaX])\n if position == set(["top"]):\n return _df_anno(C, B, eX, eY)\n if position == set(["bottom", "left"]):\n return _df_anno(L, T, iX, Y[aiX])\n if position == set(["bottom", "right"]):\n return _df_anno(R, T, aX, Y[aaX])\n if position == set(["bottom"]):\n return _df_anno(C, T, eX, eY)\n if position == set(["left"]):\n return _df_anno(R, M, iX, Y[aiX])\n if position == set(["right"]):\n return _df_anno(L, M, aX, Y[aaX])\n raise ValueError('Invalid annotation position "%s"' % (pos_str,))\n\n\ndef annotation_params_for_rect(shape_type, shape_args, position):\n x0 = shape_args["x0"]\n x1 = shape_args["x1"]\n y0 = shape_args["y0"]\n y1 = shape_args["y1"]\n\n position, pos_str = _prepare_position(position, prepend_inside=True)\n if position == set(["inside", "top", "left"]):\n return _df_anno("left", "top", min([x0, x1]), max([y0, y1]))\n if position == set(["inside", "top", "right"]):\n return _df_anno("right", "top", max([x0, x1]), max([y0, y1]))\n if position == set(["inside", "top"]):\n return _df_anno("center", "top", _mean([x0, x1]), max([y0, y1]))\n if position == set(["inside", "bottom", "left"]):\n return _df_anno("left", "bottom", min([x0, x1]), min([y0, y1]))\n if position == set(["inside", "bottom", "right"]):\n return _df_anno("right", "bottom", max([x0, x1]), min([y0, y1]))\n if position == set(["inside", "bottom"]):\n return _df_anno("center", "bottom", _mean([x0, x1]), min([y0, y1]))\n if position == set(["inside", "left"]):\n return _df_anno("left", "middle", min([x0, x1]), _mean([y0, y1]))\n if position == set(["inside", "right"]):\n return _df_anno("right", "middle", max([x0, x1]), _mean([y0, y1]))\n if position == set(["inside"]):\n # TODO: Do we want this?\n return _df_anno("center", "middle", _mean([x0, x1]), _mean([y0, y1]))\n if position == set(["outside", "top", "left"]):\n return _df_anno(\n "right" if shape_type == "vrect" else "left",\n "bottom" if shape_type == "hrect" else "top",\n min([x0, x1]),\n max([y0, y1]),\n )\n if position == set(["outside", "top", "right"]):\n return _df_anno(\n "left" if shape_type == "vrect" else "right",\n "bottom" if shape_type == "hrect" else "top",\n max([x0, x1]),\n max([y0, y1]),\n )\n if position == set(["outside", "top"]):\n return _df_anno("center", "bottom", _mean([x0, x1]), max([y0, y1]))\n if position == set(["outside", "bottom", "left"]):\n return _df_anno(\n "right" if shape_type == "vrect" else "left",\n "top" if shape_type == "hrect" else "bottom",\n min([x0, x1]),\n min([y0, y1]),\n )\n if position == set(["outside", "bottom", "right"]):\n return _df_anno(\n "left" if shape_type == "vrect" else "right",\n "top" if shape_type == "hrect" else "bottom",\n max([x0, x1]),\n min([y0, y1]),\n )\n if position == set(["outside", "bottom"]):\n return _df_anno("center", "top", _mean([x0, x1]), min([y0, y1]))\n if position == set(["outside", "left"]):\n return _df_anno("right", "middle", min([x0, x1]), _mean([y0, y1]))\n if position == set(["outside", "right"]):\n return _df_anno("left", "middle", max([x0, x1]), _mean([y0, y1]))\n raise ValueError("Invalid annotation position %s" % (pos_str,))\n\n\ndef axis_spanning_shape_annotation(annotation, shape_type, shape_args, kwargs):\n """\n annotation: a go.layout.Annotation object, a dict describing an annotation, or None\n shape_type: one of 'vline', 'hline', 'vrect', 'hrect' and determines how the\n x, y, xanchor, and yanchor values are set.\n shape_args: the parameters used to draw the shape, which are used to place the annotation\n kwargs: a dictionary that was the kwargs of a\n _process_multiple_axis_spanning_shapes spanning shapes call. Items in this\n dict whose keys start with 'annotation_' will be extracted and the keys with\n the 'annotation_' part stripped off will be used to assign properties of the\n new annotation.\n\n Property precedence:\n The annotation's x, y, xanchor, and yanchor properties are set based on the\n shape_type argument. Each property already specified in the annotation or\n through kwargs will be left as is (not replaced by the value computed using\n shape_type). Note that the xref and yref properties will in general get\n overwritten if the result of this function is passed to an add_annotation\n called with the row and col parameters specified.\n\n Returns an annotation populated with fields based on the\n annotation_position, annotation_ prefixed kwargs or the original annotation\n passed in to this function.\n """\n # set properties based on annotation_ prefixed kwargs\n prefix = "annotation_"\n len_prefix = len(prefix)\n annotation_keys = list(filter(lambda k: k.startswith(prefix), kwargs.keys()))\n # If no annotation or annotation-key is specified, return None as we don't\n # want an annotation in this case\n if annotation is None and len(annotation_keys) == 0:\n return None\n # TODO: Would it be better if annotation were initialized to an instance of\n # go.layout.Annotation ?\n if annotation is None:\n annotation = dict()\n for k in annotation_keys:\n if k == "annotation_position":\n # don't set so that Annotation constructor doesn't complain\n continue\n subk = k[len_prefix:]\n annotation[subk] = kwargs[k]\n # set x, y, xanchor, yanchor based on shape_type and position\n annotation_position = None\n if "annotation_position" in kwargs.keys():\n annotation_position = kwargs["annotation_position"]\n if shape_type.endswith("line"):\n shape_dict = annotation_params_for_line(\n shape_type, shape_args, annotation_position\n )\n elif shape_type.endswith("rect"):\n shape_dict = annotation_params_for_rect(\n shape_type, shape_args, annotation_position\n )\n for k in shape_dict.keys():\n # only set property derived from annotation_position if it hasn't already been set\n # see above: this would be better as a go.layout.Annotation then the key\n # would be checked for validity here (otherwise it is checked later,\n # which I guess is ok too)\n if (k not in annotation) or (annotation[k] is None):\n annotation[k] = shape_dict[k]\n return annotation\n\n\ndef split_dict_by_key_prefix(d, prefix):\n """\n Returns two dictionaries, one containing all the items whose keys do not\n start with a prefix and another containing all the items whose keys do start\n with the prefix. Note that the prefix is not removed from the keys.\n """\n no_prefix = dict()\n with_prefix = dict()\n for k in d.keys():\n if k.startswith(prefix):\n with_prefix[k] = d[k]\n else:\n no_prefix[k] = d[k]\n return (no_prefix, with_prefix)\n
.venv\Lib\site-packages\plotly\shapeannotation.py
shapeannotation.py
Python
9,817
0.95
0.292683
0.076233
node-utils
293
2024-03-06T15:21:06.846982
BSD-3-Clause
false
011f482536389f838f9226314b2a4856
import plotly.graph_objects as go\nfrom . import _subplots as _sub\nfrom ._subplots import SubplotXY, SubplotDomain, SubplotRef # noqa: F401\n\n\ndef make_subplots(\n rows=1,\n cols=1,\n shared_xaxes=False,\n shared_yaxes=False,\n start_cell="top-left",\n print_grid=False,\n horizontal_spacing=None,\n vertical_spacing=None,\n subplot_titles=None,\n column_widths=None,\n row_heights=None,\n specs=None,\n insets=None,\n column_titles=None,\n row_titles=None,\n x_title=None,\n y_title=None,\n figure=None,\n **kwargs,\n) -> go.Figure:\n """\n Return an instance of plotly.graph_objs.Figure with predefined subplots\n configured in 'layout'.\n\n Parameters\n ----------\n rows: int (default 1)\n Number of rows in the subplot grid. Must be greater than zero.\n\n cols: int (default 1)\n Number of columns in the subplot grid. Must be greater than zero.\n\n shared_xaxes: boolean or str (default False)\n Assign shared (linked) x-axes for 2D cartesian subplots\n\n - True or 'columns': Share axes among subplots in the same column\n - 'rows': Share axes among subplots in the same row\n - 'all': Share axes across all subplots in the grid.\n\n shared_yaxes: boolean or str (default False)\n Assign shared (linked) y-axes for 2D cartesian subplots\n\n - 'columns': Share axes among subplots in the same column\n - True or 'rows': Share axes among subplots in the same row\n - 'all': Share axes across all subplots in the grid.\n\n start_cell: 'bottom-left' or 'top-left' (default 'top-left')\n Choose the starting cell in the subplot grid used to set the\n domains_grid of the subplots.\n\n - 'top-left': Subplots are numbered with (1, 1) in the top\n left corner\n - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom\n left corner\n\n print_grid: boolean (default False):\n If True, prints a string representation of the plot grid. Grid may\n also be printed using the `Figure.print_grid()` method on the\n resulting figure.\n\n horizontal_spacing: float (default 0.2 / cols)\n Space between subplot columns in normalized plot coordinates. Must be\n a float between 0 and 1.\n\n Applies to all columns (use 'specs' subplot-dependents spacing)\n\n vertical_spacing: float (default 0.3 / rows)\n Space between subplot rows in normalized plot coordinates. Must be\n a float between 0 and 1.\n\n Applies to all rows (use 'specs' subplot-dependents spacing)\n\n subplot_titles: list of str or None (default None)\n Title of each subplot as a list in row-major ordering.\n\n Empty strings ("") can be included in the list if no subplot title\n is desired in that space so that the titles are properly indexed.\n\n specs: list of lists of dict or None (default None)\n Per subplot specifications of subplot type, row/column spanning, and\n spacing.\n\n ex1: specs=[[{}, {}], [{'colspan': 2}, None]]\n\n ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]\n\n - Indices of the outer list correspond to subplot grid rows\n starting from the top, if start_cell='top-left',\n or bottom, if start_cell='bottom-left'.\n The number of rows in 'specs' must be equal to 'rows'.\n\n - Indices of the inner lists correspond to subplot grid columns\n starting from the left. The number of columns in 'specs'\n must be equal to 'cols'.\n\n - Each item in the 'specs' list corresponds to one subplot\n in a subplot grid. (N.B. The subplot grid has exactly 'rows'\n times 'cols' cells.)\n\n - Use None for a blank a subplot cell (or to move past a col/row span).\n\n - Note that specs[0][0] has the specs of the 'start_cell' subplot.\n\n - Each item in 'specs' is a dictionary.\n The available keys are:\n * type (string, default 'xy'): Subplot type. One of\n - 'xy': 2D Cartesian subplot type for scatter, bar, etc.\n - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.\n - 'polar': Polar subplot for scatterpolar, barpolar, etc.\n - 'ternary': Ternary subplot for scatterternary\n - 'map': Map subplot for scattermap\n - 'mapbox': Mapbox subplot for scattermapbox\n - 'domain': Subplot type for traces that are individually\n positioned. pie, parcoords, parcats, etc.\n - trace type: A trace type which will be used to determine\n the appropriate subplot type for that trace\n\n * secondary_y (bool, default False): If True, create a secondary\n y-axis positioned on the right side of the subplot. Only valid\n if type='xy'.\n * colspan (int, default 1): number of subplot columns\n for this subplot to span.\n * rowspan (int, default 1): number of subplot rows\n for this subplot to span.\n * l (float, default 0.0): padding left of cell\n * r (float, default 0.0): padding right of cell\n * t (float, default 0.0): padding right of cell\n * b (float, default 0.0): padding bottom of cell\n\n - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust\n the spacing in between the subplots.\n\n insets: list of dict or None (default None):\n Inset specifications. Insets are subplots that overlay grid subplots\n\n - Each item in 'insets' is a dictionary.\n The available keys are:\n\n * cell (tuple, default=(1,1)): (row, col) index of the\n subplot cell to overlay inset axes onto.\n * type (string, default 'xy'): Subplot type\n * l (float, default=0.0): padding left of inset\n in fraction of cell width\n * w (float or 'to_end', default='to_end') inset width\n in fraction of cell width ('to_end': to cell right edge)\n * b (float, default=0.0): padding bottom of inset\n in fraction of cell height\n * h (float or 'to_end', default='to_end') inset height\n in fraction of cell height ('to_end': to cell top edge)\n\n column_widths: list of numbers or None (default None)\n list of length `cols` of the relative widths of each column of subplots.\n Values are normalized internally and used to distribute overall width\n of the figure (excluding padding) among the columns.\n\n For backward compatibility, may also be specified using the\n `column_width` keyword argument.\n\n row_heights: list of numbers or None (default None)\n list of length `rows` of the relative heights of each row of subplots.\n If start_cell='top-left' then row heights are applied top to bottom.\n Otherwise, if start_cell='bottom-left' then row heights are applied\n bottom to top.\n\n For backward compatibility, may also be specified using the\n `row_width` kwarg. If specified as `row_width`, then the width values\n are applied from bottom to top regardless of the value of start_cell.\n This matches the legacy behavior of the `row_width` argument.\n\n column_titles: list of str or None (default None)\n list of length `cols` of titles to place above the top subplot in\n each column.\n\n row_titles: list of str or None (default None)\n list of length `rows` of titles to place on the right side of each\n row of subplots. If start_cell='top-left' then row titles are\n applied top to bottom. Otherwise, if start_cell='bottom-left' then\n row titles are applied bottom to top.\n\n x_title: str or None (default None)\n Title to place below the bottom row of subplots,\n centered horizontally\n\n y_title: str or None (default None)\n Title to place to the left of the left column of subplots,\n centered vertically\n\n figure: go.Figure or None (default None)\n If None, a new go.Figure instance will be created and its axes will be\n populated with those corresponding to the requested subplot geometry and\n this new figure will be returned.\n If a go.Figure instance, the axes will be added to the\n layout of this figure and this figure will be returned. If the figure\n already contains axes, they will be overwritten.\n\n Examples\n --------\n\n Example 1:\n\n >>> # Stack two subplots vertically, and add a scatter trace to each\n >>> from plotly.subplots import make_subplots\n >>> import plotly.graph_objects as go\n >>> fig = make_subplots(rows=2)\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n [ (2,1) xaxis2,yaxis2 ]\n\n >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n or see Figure.add_trace\n\n Example 2:\n\n >>> # Stack a scatter plot\n >>> fig = make_subplots(rows=2, shared_xaxes=True)\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n [ (2,1) xaxis2,yaxis2 ]\n\n >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 3:\n\n >>> # irregular subplot layout (more examples below under 'specs')\n >>> fig = make_subplots(rows=2, cols=2,\n ... specs=[[{}, {}],\n ... [{'colspan': 2}, None]])\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ] [ (1,2) xaxis2,yaxis2 ]\n [ (2,1) xaxis3,yaxis3 - ]\n\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 4:\n\n >>> # insets\n >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n\n With insets:\n [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]\n\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS\n Figure(...)\n\n Example 5:\n\n >>> # include subplot titles\n >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 6:\n\n Subplot with mixed subplot types\n\n >>> fig = make_subplots(rows=2, cols=2,\n ... specs=[[{'type': 'xy'}, {'type': 'polar'}],\n ... [{'type': 'scene'}, {'type': 'ternary'}]])\n\n >>> fig.add_traces(\n ... [go.Scatter(y=[2, 3, 1]),\n ... go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),\n ... go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),\n ... go.Scatterternary(a=[0.1, 0.2, 0.1],\n ... b=[0.2, 0.3, 0.1],\n ... c=[0.7, 0.5, 0.8])],\n ... rows=[1, 1, 2, 2],\n ... cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS\n Figure(...)\n """\n\n return _sub.make_subplots(\n rows,\n cols,\n shared_xaxes,\n shared_yaxes,\n start_cell,\n print_grid,\n horizontal_spacing,\n vertical_spacing,\n subplot_titles,\n column_widths,\n row_heights,\n specs,\n insets,\n column_titles,\n row_titles,\n x_title,\n y_title,\n figure,\n **kwargs,\n )\n
.venv\Lib\site-packages\plotly\subplots.py
subplots.py
Python
12,134
0.95
0.061538
0.062257
awesome-app
29
2025-03-28T07:16:10.516542
GPL-3.0
false
2e1852e1f653937579686d1db863fa0e
"""\ntools\n=====\n\nFunctions that USERS will possibly want access to.\n\n"""\n\nimport json\nimport warnings\n\nimport os\n\nfrom plotly import exceptions, optional_imports\nfrom plotly.files import PLOTLY_DIR\n\nDEFAULT_PLOTLY_COLORS = [\n "rgb(31, 119, 180)",\n "rgb(255, 127, 14)",\n "rgb(44, 160, 44)",\n "rgb(214, 39, 40)",\n "rgb(148, 103, 189)",\n "rgb(140, 86, 75)",\n "rgb(227, 119, 194)",\n "rgb(127, 127, 127)",\n "rgb(188, 189, 34)",\n "rgb(23, 190, 207)",\n]\n\n\nREQUIRED_GANTT_KEYS = ["Task", "Start", "Finish"]\nPLOTLY_SCALES = {\n "Greys": ["rgb(0,0,0)", "rgb(255,255,255)"],\n "YlGnBu": ["rgb(8,29,88)", "rgb(255,255,217)"],\n "Greens": ["rgb(0,68,27)", "rgb(247,252,245)"],\n "YlOrRd": ["rgb(128,0,38)", "rgb(255,255,204)"],\n "Bluered": ["rgb(0,0,255)", "rgb(255,0,0)"],\n "RdBu": ["rgb(5,10,172)", "rgb(178,10,28)"],\n "Reds": ["rgb(220,220,220)", "rgb(178,10,28)"],\n "Blues": ["rgb(5,10,172)", "rgb(220,220,220)"],\n "Picnic": ["rgb(0,0,255)", "rgb(255,0,0)"],\n "Rainbow": ["rgb(150,0,90)", "rgb(255,0,0)"],\n "Portland": ["rgb(12,51,131)", "rgb(217,30,30)"],\n "Jet": ["rgb(0,0,131)", "rgb(128,0,0)"],\n "Hot": ["rgb(0,0,0)", "rgb(255,255,255)"],\n "Blackbody": ["rgb(0,0,0)", "rgb(160,200,255)"],\n "Earth": ["rgb(0,0,130)", "rgb(255,255,255)"],\n "Electric": ["rgb(0,0,0)", "rgb(255,250,220)"],\n "Viridis": ["rgb(68,1,84)", "rgb(253,231,37)"],\n}\n\n# color constants for violin plot\nDEFAULT_FILLCOLOR = "#1f77b4"\nDEFAULT_HISTNORM = "probability density"\nALTERNATIVE_HISTNORM = "probability"\n\n\n# Warning format\ndef warning_on_one_line(message, category, filename, lineno, file=None, line=None):\n return "%s:%s: %s:\n\n%s\n\n" % (filename, lineno, category.__name__, message)\n\n\nwarnings.formatwarning = warning_on_one_line\n\n\n### mpl-related tools ###\ndef mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):\n """Convert a matplotlib figure to plotly dictionary and send.\n\n All available information about matplotlib visualizations are stored\n within a matplotlib.figure.Figure object. You can create a plot in python\n using matplotlib, store the figure object, and then pass this object to\n the fig_to_plotly function. In the background, mplexporter is used to\n crawl through the mpl figure object for appropriate information. This\n information is then systematically sent to the PlotlyRenderer which\n creates the JSON structure used to make plotly visualizations. Finally,\n these dictionaries are sent to plotly and your browser should open up a\n new tab for viewing! Optionally, if you're working in IPython, you can\n set notebook=True and the PlotlyRenderer will call plotly.iplot instead\n of plotly.plot to have the graph appear directly in the IPython notebook.\n\n Note, this function gives the user access to a simple, one-line way to\n render an mpl figure in plotly. If you need to trouble shoot, you can do\n this step manually by NOT running this fuction and entereing the following:\n\n ===========================================================================\n from plotly.matplotlylib import mplexporter, PlotlyRenderer\n\n # create an mpl figure and store it under a varialble 'fig'\n\n renderer = PlotlyRenderer()\n exporter = mplexporter.Exporter(renderer)\n exporter.run(fig)\n ===========================================================================\n\n You can then inspect the JSON structures by accessing these:\n\n renderer.layout -- a plotly layout dictionary\n renderer.data -- a list of plotly data dictionaries\n """\n matplotlylib = optional_imports.get_module("plotly.matplotlylib")\n if matplotlylib:\n renderer = matplotlylib.PlotlyRenderer()\n matplotlylib.Exporter(renderer).run(fig)\n if resize:\n renderer.resize()\n if strip_style:\n renderer.strip_style()\n if verbose:\n print(renderer.msg)\n return renderer.plotly_fig\n else:\n warnings.warn(\n "To use Plotly's matplotlylib functionality, you'll need to have "\n "matplotlib successfully installed with all of its dependencies. "\n "You're getting this error because matplotlib or one of its "\n "dependencies doesn't seem to be installed correctly."\n )\n\n\n### graph_objs related tools ###\n\n\ndef get_subplots(rows=1, columns=1, print_grid=False, **kwargs):\n """Return a dictionary instance with the subplots set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.get_subplots(rows=2)\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x1', yaxis='y1')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 2:\n # print out string showing the subplot grid you've put in the layout\n fig = tools.get_subplots(rows=3, columns=2, print_grid=True)\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows, evenly spaced vertically on the figure.\n\n columns (kwarg, int greater than 0, default=1):\n Number of columns, evenly spaced horizontally on the figure.\n\n horizontal_spacing (kwarg, float in [0,1], default=0.1):\n Space between subplot columns. Applied to all columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.05):\n Space between subplot rows. Applied to all rows.\n\n print_grid (kwarg, True | False, default=False):\n If True, prints a tab-delimited string representation\n of your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / columns):\n Space between subplot columns.\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n\n """\n # TODO: protected until #282\n from plotly.graph_objs import graph_objs\n\n warnings.warn(\n "tools.get_subplots is depreciated. Please use tools.make_subplots instead."\n )\n\n # Throw exception for non-integer rows and columns\n if not isinstance(rows, int) or rows <= 0:\n raise Exception("Keyword argument 'rows' must be an int greater than 0")\n if not isinstance(columns, int) or columns <= 0:\n raise Exception("Keyword argument 'columns' must be an int greater than 0")\n\n # Throw exception if non-valid kwarg is sent\n VALID_KWARGS = ["horizontal_spacing", "vertical_spacing"]\n for key in kwargs.keys():\n if key not in VALID_KWARGS:\n raise Exception("Invalid keyword argument: '{0}'".format(key))\n\n # Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns\n try:\n horizontal_spacing = float(kwargs["horizontal_spacing"])\n except KeyError:\n horizontal_spacing = 0.2 / columns\n try:\n vertical_spacing = float(kwargs["vertical_spacing"])\n except KeyError:\n vertical_spacing = 0.3 / rows\n\n fig = dict(layout=graph_objs.Layout()) # will return this at the end\n plot_width = (1 - horizontal_spacing * (columns - 1)) / columns\n plot_height = (1 - vertical_spacing * (rows - 1)) / rows\n plot_num = 0\n for rrr in range(rows):\n for ccc in range(columns):\n xaxis_name = "xaxis{0}".format(plot_num + 1)\n x_anchor = "y{0}".format(plot_num + 1)\n x_start = (plot_width + horizontal_spacing) * ccc\n x_end = x_start + plot_width\n\n yaxis_name = "yaxis{0}".format(plot_num + 1)\n y_anchor = "x{0}".format(plot_num + 1)\n y_start = (plot_height + vertical_spacing) * rrr\n y_end = y_start + plot_height\n\n xaxis = dict(domain=[x_start, x_end], anchor=x_anchor)\n fig["layout"][xaxis_name] = xaxis\n yaxis = dict(domain=[y_start, y_end], anchor=y_anchor)\n fig["layout"][yaxis_name] = yaxis\n plot_num += 1\n\n if print_grid:\n print("This is the format of your plot grid!")\n grid_string = ""\n plot = 1\n for rrr in range(rows):\n grid_line = ""\n for ccc in range(columns):\n grid_line += "[{0}]\t".format(plot)\n plot += 1\n grid_string = grid_line + "\n" + grid_string\n print(grid_string)\n\n return graph_objs.Figure(fig) # forces us to validate what we just did...\n\n\ndef make_subplots(\n rows=1,\n cols=1,\n shared_xaxes=False,\n shared_yaxes=False,\n start_cell="top-left",\n print_grid=None,\n **kwargs,\n):\n """Return an instance of plotly.graph_objs.Figure\n with the subplots domain set in 'layout'.\n\n Example 1:\n # stack two subplots vertically\n fig = tools.make_subplots(rows=2)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n # or see Figure.add_trace\n\n Example 2:\n # subplots with shared x axes\n fig = tools.make_subplots(rows=2, shared_xaxes=True)\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x1,y2 ]\n\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]\n\n Example 3:\n # irregular subplot layout (more examples below under 'specs')\n fig = tools.make_subplots(rows=2, cols=2,\n specs=[[{}, {}],\n [{'colspan': 2}, None]])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ] [ (1,2) x2,y2 ]\n [ (2,1) x3,y3 - ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]\n\n Example 4:\n # insets\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 5:\n # include subplot titles\n fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Example 6:\n # Include subplot title on one plot (but not all)\n fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],\n subplot_titles=('','Inset'))\n\n This is the format of your plot grid!\n [ (1,1) x1,y1 ]\n\n With insets:\n [ x2,y2 ] over [ (1,1) x1,y1 ]\n\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]\n fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]\n\n Keywords arguments with constant defaults:\n\n rows (kwarg, int greater than 0, default=1):\n Number of rows in the subplot grid.\n\n cols (kwarg, int greater than 0, default=1):\n Number of columns in the subplot grid.\n\n shared_xaxes (kwarg, boolean or list, default=False)\n Assign shared x axes.\n If True, subplots in the same grid column have one common\n shared x-axis at the bottom of the gird.\n\n To assign shared x axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared x axis)\n of cell index tuples.\n\n shared_yaxes (kwarg, boolean or list, default=False)\n Assign shared y axes.\n If True, subplots in the same grid row have one common\n shared y-axis on the left-hand side of the gird.\n\n To assign shared y axes per subplot grid cell (see 'specs'),\n send list (or list of lists, one list per shared y axis)\n of cell index tuples.\n\n start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')\n Choose the starting cell in the subplot grid used to set the\n domains of the subplots.\n\n print_grid (kwarg, boolean, default=True):\n If True, prints a tab-delimited string representation of\n your plot grid.\n\n Keyword arguments with variable defaults:\n\n horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):\n Space between subplot columns.\n Applies to all columns (use 'specs' subplot-dependents spacing)\n\n vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):\n Space between subplot rows.\n Applies to all rows (use 'specs' subplot-dependents spacing)\n\n subplot_titles (kwarg, list of strings, default=empty list):\n Title of each subplot.\n "" can be included in the list if no subplot title is desired in\n that space so that the titles are properly indexed.\n\n specs (kwarg, list of lists of dictionaries):\n Subplot specifications.\n\n ex1: specs=[[{}, {}], [{'colspan': 2}, None]]\n\n ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]\n\n - Indices of the outer list correspond to subplot grid rows\n starting from the bottom. The number of rows in 'specs'\n must be equal to 'rows'.\n\n - Indices of the inner lists correspond to subplot grid columns\n starting from the left. The number of columns in 'specs'\n must be equal to 'cols'.\n\n - Each item in the 'specs' list corresponds to one subplot\n in a subplot grid. (N.B. The subplot grid has exactly 'rows'\n times 'cols' cells.)\n\n - Use None for blank a subplot cell (or to move pass a col/row span).\n\n - Note that specs[0][0] has the specs of the 'start_cell' subplot.\n\n - Each item in 'specs' is a dictionary.\n The available keys are:\n\n * is_3d (boolean, default=False): flag for 3d scenes\n * colspan (int, default=1): number of subplot columns\n for this subplot to span.\n * rowspan (int, default=1): number of subplot rows\n for this subplot to span.\n * l (float, default=0.0): padding left of cell\n * r (float, default=0.0): padding right of cell\n * t (float, default=0.0): padding right of cell\n * b (float, default=0.0): padding bottom of cell\n\n - Use 'horizontal_spacing' and 'vertical_spacing' to adjust\n the spacing in between the subplots.\n\n insets (kwarg, list of dictionaries):\n Inset specifications.\n\n - Each item in 'insets' is a dictionary.\n The available keys are:\n\n * cell (tuple, default=(1,1)): (row, col) index of the\n subplot cell to overlay inset axes onto.\n * is_3d (boolean, default=False): flag for 3d scenes\n * l (float, default=0.0): padding left of inset\n in fraction of cell width\n * w (float or 'to_end', default='to_end') inset width\n in fraction of cell width ('to_end': to cell right edge)\n * b (float, default=0.0): padding bottom of inset\n in fraction of cell height\n * h (float or 'to_end', default='to_end') inset height\n in fraction of cell height ('to_end': to cell top edge)\n\n column_width (kwarg, list of numbers)\n Column_width specifications\n\n - Functions similarly to `column_width` of `plotly.graph_objs.Table`.\n Specify a list that contains numbers where the amount of numbers in\n the list is equal to `cols`.\n\n - The numbers in the list indicate the proportions that each column\n domains take across the full horizontal domain excluding padding.\n\n - For example, if columns_width=[3, 1], horizontal_spacing=0, and\n cols=2, the domains for each column would be [0. 0.75] and [0.75, 1]\n\n row_width (kwargs, list of numbers)\n Row_width specifications\n\n - Functions similarly to `column_width`. Specify a list that contains\n numbers where the amount of numbers in the list is equal to `rows`.\n\n - The numbers in the list indicate the proportions that each row\n domains take along the full vertical domain excluding padding.\n\n - For example, if row_width=[3, 1], vertical_spacing=0, and\n cols=2, the domains for each row from top to botton would be\n [0. 0.75] and [0.75, 1]\n """\n import plotly.subplots\n\n warnings.warn(\n "plotly.tools.make_subplots is deprecated, "\n "please use plotly.subplots.make_subplots instead",\n DeprecationWarning,\n stacklevel=1,\n )\n\n return plotly.subplots.make_subplots(\n rows=rows,\n cols=cols,\n shared_xaxes=shared_xaxes,\n shared_yaxes=shared_yaxes,\n start_cell=start_cell,\n print_grid=print_grid,\n **kwargs,\n )\n\n\nwarnings.filterwarnings(\n "default", r"plotly\.tools\.make_subplots is deprecated", DeprecationWarning\n)\n\n\ndef get_graph_obj(obj, obj_type=None):\n """Returns a new graph object.\n\n OLD FUNCTION: this will *silently* strip out invalid pieces of the object.\n NEW FUNCTION: no striping of invalid pieces anymore - only raises error\n on unrecognized graph_objs\n """\n # TODO: Deprecate or move. #283\n from plotly.graph_objs import graph_objs\n\n try:\n cls = getattr(graph_objs, obj_type)\n except (AttributeError, KeyError):\n raise exceptions.PlotlyError(\n "'{}' is not a recognized graph_obj.".format(obj_type)\n )\n return cls(obj)\n\n\ndef _replace_newline(obj):\n """Replaces '\n' with '<br>' for all strings in a collection."""\n if isinstance(obj, dict):\n d = dict()\n for key, val in list(obj.items()):\n d[key] = _replace_newline(val)\n return d\n elif isinstance(obj, list):\n temp = list()\n for index, entry in enumerate(obj):\n temp += [_replace_newline(entry)]\n return temp\n elif isinstance(obj, str):\n s = obj.replace("\n", "<br>")\n if s != obj:\n warnings.warn(\n "Looks like you used a newline character: '\\n'.\n\n"\n "Plotly uses a subset of HTML escape characters\n"\n "to do things like newline (<br>), bold (<b></b>),\n"\n "italics (<i></i>), etc. Your newline characters \n"\n "have been converted to '<br>' so they will show \n"\n "up right on your Plotly figure!"\n )\n return s\n else:\n return obj # we return the actual reference... but DON'T mutate.\n\n\ndef return_figure_from_figure_or_data(figure_or_data, validate_figure):\n from plotly.graph_objs import Figure\n from plotly.basedatatypes import BaseFigure\n\n validated = False\n if isinstance(figure_or_data, dict):\n figure = figure_or_data\n elif isinstance(figure_or_data, list):\n figure = {"data": figure_or_data}\n elif isinstance(figure_or_data, BaseFigure):\n figure = figure_or_data.to_dict()\n validated = True\n else:\n raise exceptions.PlotlyError(\n "The `figure_or_data` positional "\n "argument must be "\n "`dict`-like, `list`-like, or an instance of plotly.graph_objs.Figure"\n )\n\n if validate_figure and not validated:\n try:\n figure = Figure(**figure).to_dict()\n except exceptions.PlotlyError as err:\n raise exceptions.PlotlyError(\n "Invalid 'figure_or_data' argument. "\n "Plotly will not be able to properly "\n "parse the resulting JSON. If you "\n "want to send this 'figure_or_data' "\n "to Plotly anyway (not recommended), "\n "you can set 'validate=False' as a "\n "plot option.\nHere's why you're "\n "seeing this error:\n\n{0}"\n "".format(err)\n )\n if not figure["data"]:\n raise exceptions.PlotlyEmptyDataError(\n "Empty data list found. Make sure that you populated the "\n "list of data objects you're sending and try again.\n"\n "Questions? Visit support.plot.ly"\n )\n\n return figure\n\n\n# Default colours for finance charts\n_DEFAULT_INCREASING_COLOR = "#3D9970" # http://clrs.cc\n_DEFAULT_DECREASING_COLOR = "#FF4136"\n\nDIAG_CHOICES = ["scatter", "histogram", "box"]\nVALID_COLORMAP_TYPES = ["cat", "seq"]\n\n\n# Deprecations\nclass FigureFactory(object):\n @staticmethod\n def _deprecated(old_method, new_method=None):\n if new_method is None:\n # The method name stayed the same.\n new_method = old_method\n warnings.warn(\n "plotly.tools.FigureFactory.{} is deprecated. "\n "Use plotly.figure_factory.{}".format(old_method, new_method)\n )\n\n @staticmethod\n def create_2D_density(*args, **kwargs):\n FigureFactory._deprecated("create_2D_density", "create_2d_density")\n from plotly.figure_factory import create_2d_density\n\n return create_2d_density(*args, **kwargs)\n\n @staticmethod\n def create_annotated_heatmap(*args, **kwargs):\n FigureFactory._deprecated("create_annotated_heatmap")\n from plotly.figure_factory import create_annotated_heatmap\n\n return create_annotated_heatmap(*args, **kwargs)\n\n @staticmethod\n def create_candlestick(*args, **kwargs):\n FigureFactory._deprecated("create_candlestick")\n from plotly.figure_factory import create_candlestick\n\n return create_candlestick(*args, **kwargs)\n\n @staticmethod\n def create_dendrogram(*args, **kwargs):\n FigureFactory._deprecated("create_dendrogram")\n from plotly.figure_factory import create_dendrogram\n\n return create_dendrogram(*args, **kwargs)\n\n @staticmethod\n def create_distplot(*args, **kwargs):\n FigureFactory._deprecated("create_distplot")\n from plotly.figure_factory import create_distplot\n\n return create_distplot(*args, **kwargs)\n\n @staticmethod\n def create_facet_grid(*args, **kwargs):\n FigureFactory._deprecated("create_facet_grid")\n from plotly.figure_factory import create_facet_grid\n\n return create_facet_grid(*args, **kwargs)\n\n @staticmethod\n def create_gantt(*args, **kwargs):\n FigureFactory._deprecated("create_gantt")\n from plotly.figure_factory import create_gantt\n\n return create_gantt(*args, **kwargs)\n\n @staticmethod\n def create_ohlc(*args, **kwargs):\n FigureFactory._deprecated("create_ohlc")\n from plotly.figure_factory import create_ohlc\n\n return create_ohlc(*args, **kwargs)\n\n @staticmethod\n def create_quiver(*args, **kwargs):\n FigureFactory._deprecated("create_quiver")\n from plotly.figure_factory import create_quiver\n\n return create_quiver(*args, **kwargs)\n\n @staticmethod\n def create_scatterplotmatrix(*args, **kwargs):\n FigureFactory._deprecated("create_scatterplotmatrix")\n from plotly.figure_factory import create_scatterplotmatrix\n\n return create_scatterplotmatrix(*args, **kwargs)\n\n @staticmethod\n def create_streamline(*args, **kwargs):\n FigureFactory._deprecated("create_streamline")\n from plotly.figure_factory import create_streamline\n\n return create_streamline(*args, **kwargs)\n\n @staticmethod\n def create_table(*args, **kwargs):\n FigureFactory._deprecated("create_table")\n from plotly.figure_factory import create_table\n\n return create_table(*args, **kwargs)\n\n @staticmethod\n def create_trisurf(*args, **kwargs):\n FigureFactory._deprecated("create_trisurf")\n from plotly.figure_factory import create_trisurf\n\n return create_trisurf(*args, **kwargs)\n\n @staticmethod\n def create_violin(*args, **kwargs):\n FigureFactory._deprecated("create_violin")\n from plotly.figure_factory import create_violin\n\n return create_violin(*args, **kwargs)\n\n\ndef get_config_plotly_server_url():\n """\n Function to get the .config file's 'plotly_domain' without importing\n the chart_studio package. This property is needed to compute the default\n value of the plotly.js config plotlyServerURL, so it is independent of\n the chart_studio integration and still needs to live in\n\n Returns\n -------\n str\n """\n config_file = os.path.join(PLOTLY_DIR, ".config")\n default_server_url = "https://plot.ly"\n if not os.path.exists(config_file):\n return default_server_url\n with open(config_file, "rt") as f:\n try:\n config_dict = json.load(f)\n if not isinstance(config_dict, dict):\n config_dict = {}\n except Exception:\n # TODO: issue a warning and bubble it up\n config_dict = {}\n\n return config_dict.get("plotly_domain", default_server_url)\n
.venv\Lib\site-packages\plotly\tools.py
tools.py
Python
24,915
0.95
0.102817
0.068841
python-kit
699
2023-12-31T04:29:06.436852
BSD-3-Clause
false
c057494d98c1a018d8aab77127a47479
import textwrap\nfrom pprint import PrettyPrinter\n\nfrom _plotly_utils.utils import NotEncodable, PlotlyJSONEncoder, get_module # noqa: F401\nfrom _plotly_utils.data_utils import image_array_to_data_uri # noqa: F401\n\n\n# Pretty printing\ndef _list_repr_elided(v, threshold=200, edgeitems=3, indent=0, width=80):\n """\n Return a string representation for of a list where list is elided if\n it has more than n elements\n\n Parameters\n ----------\n v : list\n Input list\n threshold :\n Maximum number of elements to display\n\n Returns\n -------\n str\n """\n if isinstance(v, list):\n open_char, close_char = "[", "]"\n elif isinstance(v, tuple):\n open_char, close_char = "(", ")"\n else:\n raise ValueError("Invalid value of type: %s" % type(v))\n\n if len(v) <= threshold:\n disp_v = v\n else:\n disp_v = list(v[:edgeitems]) + ["..."] + list(v[-edgeitems:])\n\n v_str = open_char + ", ".join([str(e) for e in disp_v]) + close_char\n\n v_wrapped = "\n".join(\n textwrap.wrap(\n v_str,\n width=width,\n initial_indent=" " * (indent + 1),\n subsequent_indent=" " * (indent + 1),\n )\n ).strip()\n return v_wrapped\n\n\nclass ElidedWrapper(object):\n """\n Helper class that wraps values of certain types and produces a custom\n __repr__() that may be elided and is suitable for use during pretty\n printing\n """\n\n def __init__(self, v, threshold, indent):\n self.v = v\n self.indent = indent\n self.threshold = threshold\n\n @staticmethod\n def is_wrappable(v):\n numpy = get_module("numpy")\n if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):\n return True\n elif numpy and isinstance(v, numpy.ndarray):\n return True\n elif isinstance(v, str):\n return True\n else:\n return False\n\n def __repr__(self):\n numpy = get_module("numpy")\n if isinstance(self.v, (list, tuple)):\n # Handle lists/tuples\n res = _list_repr_elided(\n self.v, threshold=self.threshold, indent=self.indent\n )\n return res\n elif numpy and isinstance(self.v, numpy.ndarray):\n # Handle numpy arrays\n\n # Get original print opts\n orig_opts = numpy.get_printoptions()\n\n # Set threshold to self.max_list_elements\n numpy.set_printoptions(\n **dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)\n )\n\n res = self.v.__repr__()\n\n # Add indent to all but the first line\n res_lines = res.split("\n")\n res = ("\n" + " " * self.indent).join(res_lines)\n\n # Restore print opts\n numpy.set_printoptions(**orig_opts)\n return res\n elif isinstance(self.v, str):\n # Handle strings\n if len(self.v) > 80:\n return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"\n else:\n return self.v.__repr__()\n else:\n return self.v.__repr__()\n\n\nclass ElidedPrettyPrinter(PrettyPrinter):\n """\n PrettyPrinter subclass that elides long lists/arrays/strings\n """\n\n def __init__(self, *args, **kwargs):\n self.threshold = kwargs.pop("threshold", 200)\n PrettyPrinter.__init__(self, *args, **kwargs)\n\n def _format(self, val, stream, indent, allowance, context, level):\n if ElidedWrapper.is_wrappable(val):\n elided_val = ElidedWrapper(val, self.threshold, indent)\n\n return self._format(elided_val, stream, indent, allowance, context, level)\n else:\n return PrettyPrinter._format(\n self, val, stream, indent, allowance, context, level\n )\n\n\ndef node_generator(node, path=()):\n """\n General, node-yielding generator.\n\n Yields (node, path) tuples when it finds values that are dict\n instances.\n\n A path is a sequence of hashable values that can be used as either keys to\n a mapping (dict) or indices to a sequence (list). A path is always wrt to\n some object. Given an object, a path explains how to get from the top level\n of that object to a nested value in the object.\n\n :param (dict) node: Part of a dict to be traversed.\n :param (tuple[str]) path: Defines the path of the current node.\n :return: (Generator)\n\n Example:\n\n >>> for node, path in node_generator({'a': {'b': 5}}):\n ... print(node, path)\n {'a': {'b': 5}} ()\n {'b': 5} ('a',)\n\n """\n if not isinstance(node, dict):\n return # in case it's called with a non-dict node at top level\n yield node, path\n for key, val in node.items():\n if isinstance(val, dict):\n for item in node_generator(val, path + (key,)):\n yield item\n\n\ndef get_by_path(obj, path):\n """\n Iteratively get on obj for each key in path.\n\n :param (list|dict) obj: The top-level object.\n :param (tuple[str]|tuple[int]) path: Keys to access parts of obj.\n\n :return: (*)\n\n Example:\n\n >>> figure = {'data': [{'x': [5]}]}\n >>> path = ('data', 0, 'x')\n >>> get_by_path(figure, path)\n [5]\n """\n for key in path:\n obj = obj[key]\n return obj\n\n\ndef decode_unicode(coll):\n if isinstance(coll, list):\n for no, entry in enumerate(coll):\n if isinstance(entry, (dict, list)):\n coll[no] = decode_unicode(entry)\n else:\n if isinstance(entry, str):\n try:\n coll[no] = str(entry)\n except UnicodeEncodeError:\n pass\n elif isinstance(coll, dict):\n keys, vals = list(coll.keys()), list(coll.values())\n for key, val in zip(keys, vals):\n if isinstance(val, (dict, list)):\n coll[key] = decode_unicode(val)\n elif isinstance(val, str):\n try:\n coll[key] = str(val)\n except UnicodeEncodeError:\n pass\n coll[str(key)] = coll.pop(key)\n return coll\n
.venv\Lib\site-packages\plotly\utils.py
utils.py
Python
6,233
0.95
0.177885
0.053254
node-utils
158
2025-04-07T15:36:54.992140
GPL-3.0
false
3699987ca132bd7358ecdac3549a619c
from _plotly_utils.basevalidators import LiteralValidator\nimport _plotly_utils.basevalidators as basevalidators\nimport json\nimport os.path as opath\n\nDERIVED_CLASSES = {\n "DataValidator": "data",\n "LayoutValidator": "layout",\n}\n\n\nclass ValidatorCache(object):\n _cache = {}\n _json_cache = None\n\n @staticmethod\n def get_validator(parent_path, prop_name):\n if ValidatorCache._json_cache is None:\n # Load the JSON validator params from the file\n validator_json_path = opath.join(\n opath.dirname(__file__), "validators", "_validators.json"\n )\n if not opath.exists(validator_json_path):\n raise FileNotFoundError(\n f"Validator JSON file not found: {validator_json_path}"\n )\n with open(validator_json_path, "r") as f:\n ValidatorCache._json_cache = json.load(f)\n\n key = (parent_path, prop_name)\n if key not in ValidatorCache._cache:\n if "." not in parent_path and prop_name == "type":\n # Special case for .type property of traces\n validator = LiteralValidator("type", parent_path, parent_path)\n else:\n lookup_name = None\n if parent_path == "layout":\n from .graph_objects import Layout\n\n match = Layout._subplotid_prop_re.match(prop_name)\n if match:\n lookup_name = match.group(1)\n\n lookup_name = lookup_name or prop_name\n lookup = f"{parent_path}.{lookup_name}" if parent_path else lookup_name\n\n validator_item = ValidatorCache._json_cache.get(lookup)\n validator_classname = validator_item["superclass"]\n if validator_classname in DERIVED_CLASSES:\n # If the superclass is a derived class, we need to get the base class\n # and pass the derived class name as a parameter\n base_item = ValidatorCache._json_cache.get(\n DERIVED_CLASSES[validator_classname]\n )\n validator_params = base_item["params"]\n validator_params.update(validator_item["params"])\n validator_classname = base_item["superclass"]\n else:\n validator_params = validator_item["params"]\n validator_params["plotly_name"] = prop_name\n validator_class = getattr(basevalidators, validator_classname)\n\n validator = validator_class(**validator_params)\n ValidatorCache._cache[key] = validator\n\n return ValidatorCache._cache[key]\n
.venv\Lib\site-packages\plotly\validator_cache.py
validator_cache.py
Python
2,732
0.95
0.212121
0.071429
vue-tools
119
2025-03-13T04:51:04.948699
MIT
false
862a3c7941856c261242bc0bf8123742
# Constants\n# ---------\n# Subplot types that are each individually positioned with a domain\n#\n# Each of these subplot types has a `domain` property with `x`/`y`\n# properties.\n# Note that this set does not contain `xaxis`/`yaxis` because these behave a\n# little differently.\nimport collections\n\n_single_subplot_types = {"scene", "geo", "polar", "ternary", "map", "mapbox"}\n_subplot_types = set.union(_single_subplot_types, {"xy", "domain"})\n\n# For most subplot types, a trace is associated with a particular subplot\n# using a trace property with a name that matches the subplot type. For\n# example, a `scatter3d.scene` property set to `'scene2'` associates a\n# scatter3d trace with the second `scene` subplot in the figure.\n#\n# There are a few subplot types that don't follow this pattern, and instead\n# the trace property is just named `subplot`. For example setting\n# the `scatterpolar.subplot` property to `polar3` associates the scatterpolar\n# trace with the third polar subplot in the figure\n_subplot_prop_named_subplot = {"polar", "ternary", "map", "mapbox"}\n\n\n# Named tuple to hold an xaxis/yaxis pair that represent a single subplot\nSubplotXY = collections.namedtuple("SubplotXY", ("xaxis", "yaxis"))\nSubplotDomain = collections.namedtuple("SubplotDomain", ("x", "y"))\n\nSubplotRef = collections.namedtuple(\n "SubplotRef", ("subplot_type", "layout_keys", "trace_kwargs")\n)\n\n\ndef _get_initial_max_subplot_ids():\n max_subplot_ids = {subplot_type: 0 for subplot_type in _single_subplot_types}\n max_subplot_ids["xaxis"] = 0\n max_subplot_ids["yaxis"] = 0\n return max_subplot_ids\n\n\ndef make_subplots(\n rows=1,\n cols=1,\n shared_xaxes=False,\n shared_yaxes=False,\n start_cell="top-left",\n print_grid=False,\n horizontal_spacing=None,\n vertical_spacing=None,\n subplot_titles=None,\n column_widths=None,\n row_heights=None,\n specs=None,\n insets=None,\n column_titles=None,\n row_titles=None,\n x_title=None,\n y_title=None,\n figure=None,\n **kwargs,\n):\n """\n Return an instance of plotly.graph_objs.Figure with predefined subplots\n configured in 'layout'.\n\n Parameters\n ----------\n rows: int (default 1)\n Number of rows in the subplot grid. Must be greater than zero.\n\n cols: int (default 1)\n Number of columns in the subplot grid. Must be greater than zero.\n\n shared_xaxes: boolean or str (default False)\n Assign shared (linked) x-axes for 2D cartesian subplots\n\n - True or 'columns': Share axes among subplots in the same column\n - 'rows': Share axes among subplots in the same row\n - 'all': Share axes across all subplots in the grid.\n\n shared_yaxes: boolean or str (default False)\n Assign shared (linked) y-axes for 2D cartesian subplots\n\n - 'columns': Share axes among subplots in the same column\n - True or 'rows': Share axes among subplots in the same row\n - 'all': Share axes across all subplots in the grid.\n\n start_cell: 'bottom-left' or 'top-left' (default 'top-left')\n Choose the starting cell in the subplot grid used to set the\n domains_grid of the subplots.\n\n - 'top-left': Subplots are numbered with (1, 1) in the top\n left corner\n - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom\n left corner\n\n print_grid: boolean (default True):\n If True, prints a string representation of the plot grid. Grid may\n also be printed using the `Figure.print_grid()` method on the\n resulting figure.\n\n horizontal_spacing: float (default 0.2 / cols)\n Space between subplot columns in normalized plot coordinates. Must be\n a float between 0 and 1.\n\n Applies to all columns (use 'specs' subplot-dependents spacing)\n\n vertical_spacing: float (default 0.3 / rows)\n Space between subplot rows in normalized plot coordinates. Must be\n a float between 0 and 1.\n\n Applies to all rows (use 'specs' subplot-dependents spacing)\n\n subplot_titles: list of str or None (default None)\n Title of each subplot as a list in row-major ordering.\n\n Empty strings ("") can be included in the list if no subplot title\n is desired in that space so that the titles are properly indexed.\n\n specs: list of lists of dict or None (default None)\n Per subplot specifications of subplot type, row/column spanning, and\n spacing.\n\n ex1: specs=[[{}, {}], [{'colspan': 2}, None]]\n\n ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]\n\n - Indices of the outer list correspond to subplot grid rows\n starting from the top, if start_cell='top-left',\n or bottom, if start_cell='bottom-left'.\n The number of rows in 'specs' must be equal to 'rows'.\n\n - Indices of the inner lists correspond to subplot grid columns\n starting from the left. The number of columns in 'specs'\n must be equal to 'cols'.\n\n - Each item in the 'specs' list corresponds to one subplot\n in a subplot grid. (N.B. The subplot grid has exactly 'rows'\n times 'cols' cells.)\n\n - Use None for a blank a subplot cell (or to move past a col/row span).\n\n - Note that specs[0][0] has the specs of the 'start_cell' subplot.\n\n - Each item in 'specs' is a dictionary.\n The available keys are:\n * type (string, default 'xy'): Subplot type. One of\n - 'xy': 2D Cartesian subplot type for scatter, bar, etc.\n - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.\n - 'polar': Polar subplot for scatterpolar, barpolar, etc.\n - 'ternary': Ternary subplot for scatterternary\n - 'map': Map subplot for scattermap, choroplethmap and densitymap\n - 'mapbox': Mapbox subplot for scattermapbox, choroplethmapbox and densitymapbox\n - 'domain': Subplot type for traces that are individually\n positioned. pie, parcoords, parcats, etc.\n - trace type: A trace type which will be used to determine\n the appropriate subplot type for that trace\n\n * secondary_y (bool, default False): If True, create a secondary\n y-axis positioned on the right side of the subplot. Only valid\n if type='xy'.\n * colspan (int, default 1): number of subplot columns\n for this subplot to span.\n * rowspan (int, default 1): number of subplot rows\n for this subplot to span.\n * l (float, default 0.0): padding left of cell\n * r (float, default 0.0): padding right of cell\n * t (float, default 0.0): padding right of cell\n * b (float, default 0.0): padding bottom of cell\n\n - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust\n the spacing in between the subplots.\n\n insets: list of dict or None (default None):\n Inset specifications. Insets are subplots that overlay grid subplots\n\n - Each item in 'insets' is a dictionary.\n The available keys are:\n\n * cell (tuple, default=(1,1)): (row, col) index of the\n subplot cell to overlay inset axes onto.\n * type (string, default 'xy'): Subplot type\n * l (float, default=0.0): padding left of inset\n in fraction of cell width\n * w (float or 'to_end', default='to_end') inset width\n in fraction of cell width ('to_end': to cell right edge)\n * b (float, default=0.0): padding bottom of inset\n in fraction of cell height\n * h (float or 'to_end', default='to_end') inset height\n in fraction of cell height ('to_end': to cell top edge)\n\n column_widths: list of numbers or None (default None)\n list of length `cols` of the relative widths of each column of subplots.\n Values are normalized internally and used to distribute overall width\n of the figure (excluding padding) among the columns.\n\n For backward compatibility, may also be specified using the\n `column_width` keyword argument.\n\n row_heights: list of numbers or None (default None)\n list of length `rows` of the relative heights of each row of subplots.\n If start_cell='top-left' then row heights are applied top to bottom.\n Otherwise, if start_cell='bottom-left' then row heights are applied\n bottom to top.\n\n For backward compatibility, may also be specified using the\n `row_width` kwarg. If specified as `row_width`, then the width values\n are applied from bottom to top regardless of the value of start_cell.\n This matches the legacy behavior of the `row_width` argument.\n\n column_titles: list of str or None (default None)\n list of length `cols` of titles to place above the top subplot in\n each column.\n\n row_titles: list of str or None (default None)\n list of length `rows` of titles to place on the right side of each\n row of subplots. If start_cell='top-left' then row titles are\n applied top to bottom. Otherwise, if start_cell='bottom-left' then\n row titles are applied bottom to top.\n\n x_title: str or None (default None)\n Title to place below the bottom row of subplots,\n centered horizontally\n\n y_title: str or None (default None)\n Title to place to the left of the left column of subplots,\n centered vertically\n\n figure: go.Figure or None (default None)\n If None, a new go.Figure instance will be created and its axes will be\n populated with those corresponding to the requested subplot geometry and\n this new figure will be returned.\n If a go.Figure instance, the axes will be added to the\n layout of this figure and this figure will be returned. If the figure\n already contains axes, they will be overwritten.\n\n Examples\n --------\n\n Example 1:\n\n >>> # Stack two subplots vertically, and add a scatter trace to each\n >>> from plotly.subplots import make_subplots\n >>> import plotly.graph_objects as go\n >>> fig = make_subplots(rows=2)\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n [ (2,1) xaxis2,yaxis2 ]\n\n >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n or see Figure.append_trace\n\n Example 2:\n\n >>> # Stack a scatter plot\n >>> fig = make_subplots(rows=2, shared_xaxes=True)\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n [ (2,1) xaxis2,yaxis2 ]\n\n >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 3:\n\n >>> # irregular subplot layout (more examples below under 'specs')\n >>> fig = make_subplots(rows=2, cols=2,\n ... specs=[[{}, {}],\n ... [{'colspan': 2}, None]])\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ] [ (1,2) xaxis2,yaxis2 ]\n [ (2,1) xaxis3,yaxis3 - ]\n\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 4:\n\n >>> # insets\n >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])\n\n This is the format of your plot grid:\n [ (1,1) xaxis1,yaxis1 ]\n\n With insets:\n [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]\n\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS\n Figure(...)\n\n Example 5:\n\n >>> # include subplot titles\n >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))\n\n This is the format of your plot grid:\n [ (1,1) x1,y1 ]\n [ (2,1) x2,y2 ]\n\n >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS\n Figure(...)\n >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS\n Figure(...)\n\n Example 6:\n\n Subplot with mixed subplot types\n\n >>> fig = make_subplots(rows=2, cols=2,\n ... specs=[[{'type': 'xy'}, {'type': 'polar'}],\n ... [{'type': 'scene'}, {'type': 'ternary'}]])\n\n >>> fig.add_traces(\n ... [go.Scatter(y=[2, 3, 1]),\n ... go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),\n ... go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),\n ... go.Scatterternary(a=[0.1, 0.2, 0.1],\n ... b=[0.2, 0.3, 0.1],\n ... c=[0.7, 0.5, 0.8])],\n ... rows=[1, 1, 2, 2],\n ... cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS\n Figure(...)\n """\n\n import plotly.graph_objs as go\n\n # Handle backward compatibility\n # -----------------------------\n use_legacy_row_heights_order = "row_width" in kwargs\n row_heights = kwargs.pop("row_width", row_heights)\n column_widths = kwargs.pop("column_width", column_widths)\n\n if kwargs:\n raise TypeError(\n "make_subplots() got unexpected keyword argument(s): {}".format(\n list(kwargs)\n )\n )\n\n # Validate coerce inputs\n # ----------------------\n # ### rows ###\n if not isinstance(rows, int) or rows <= 0:\n raise ValueError(\n """\nThe 'rows' argument to make_subplots must be an int greater than 0.\n Received value of type {typ}: {val}""".format(typ=type(rows), val=repr(rows))\n )\n\n # ### cols ###\n if not isinstance(cols, int) or cols <= 0:\n raise ValueError(\n """\nThe 'cols' argument to make_subplots must be an int greater than 0.\n Received value of type {typ}: {val}""".format(typ=type(cols), val=repr(cols))\n )\n\n # ### start_cell ###\n if start_cell == "bottom-left":\n col_dir = 1\n row_dir = 1\n elif start_cell == "top-left":\n col_dir = 1\n row_dir = -1\n else:\n raise ValueError(\n """\nThe 'start_cell` argument to make_subplots must be one of \\n['bottom-left', 'top-left']\n Received value of type {typ}: {val}""".format(\n typ=type(start_cell), val=repr(start_cell)\n )\n )\n\n # ### Helper to validate coerce elements of lists of dictionaries ###\n def _check_keys_and_fill(name, arg, defaults):\n def _checks(item, defaults):\n if item is None:\n return\n if not isinstance(item, dict):\n raise ValueError(\n """\nElements of the '{name}' argument to make_subplots must be dictionaries \\nor None.\n Received value of type {typ}: {val}""".format(\n name=name, typ=type(item), val=repr(item)\n )\n )\n\n for k in item:\n if k not in defaults:\n raise ValueError(\n """\nInvalid key specified in an element of the '{name}' argument to \\nmake_subplots: {k}\n Valid keys include: {valid_keys}""".format(\n k=repr(k), name=name, valid_keys=repr(list(defaults))\n )\n )\n for k, v in defaults.items():\n item.setdefault(k, v)\n\n for arg_i in arg:\n if isinstance(arg_i, (list, tuple)):\n # 2D list\n for arg_ii in arg_i:\n _checks(arg_ii, defaults)\n elif isinstance(arg_i, dict):\n # 1D list\n _checks(arg_i, defaults)\n\n # ### specs ###\n if specs is None:\n specs = [[{} for c in range(cols)] for r in range(rows)]\n elif not (\n isinstance(specs, (list, tuple))\n and specs\n and all(isinstance(row, (list, tuple)) for row in specs)\n and len(specs) == rows\n and all(len(row) == cols for row in specs)\n and all(all(v is None or isinstance(v, dict) for v in row) for row in specs)\n ):\n raise ValueError(\n """\nThe 'specs' argument to make_subplots must be a 2D list of dictionaries with \\ndimensions ({rows} x {cols}).\n Received value of type {typ}: {val}""".format(\n rows=rows, cols=cols, typ=type(specs), val=repr(specs)\n )\n )\n\n for row in specs:\n for spec in row:\n # For backward compatibility,\n # convert is_3d flag to type='scene' kwarg\n if spec and spec.pop("is_3d", None):\n spec["type"] = "scene"\n\n spec_defaults = dict(\n type="xy", secondary_y=False, colspan=1, rowspan=1, l=0.0, r=0.0, b=0.0, t=0.0\n )\n _check_keys_and_fill("specs", specs, spec_defaults)\n\n # Validate secondary_y\n has_secondary_y = False\n for row in specs:\n for spec in row:\n if spec is not None:\n has_secondary_y = has_secondary_y or spec["secondary_y"]\n if spec and spec["type"] != "xy" and spec["secondary_y"]:\n raise ValueError(\n """\nThe 'secondary_y' spec property is not supported for subplot of type '{s_typ}'\n 'secondary_y' is only supported for subplots of type 'xy'\n""".format(s_typ=spec["type"])\n )\n\n # ### insets ###\n if insets is None or insets is False:\n insets = []\n elif not (\n isinstance(insets, (list, tuple)) and all(isinstance(v, dict) for v in insets)\n ):\n raise ValueError(\n """\nThe 'insets' argument to make_subplots must be a list of dictionaries.\n Received value of type {typ}: {val}""".format(typ=type(insets), val=repr(insets))\n )\n\n if insets:\n for inset in insets:\n if inset and inset.pop("is_3d", None):\n inset["type"] = "scene"\n\n inset_defaults = dict(\n cell=(1, 1), type="xy", l=0.0, w="to_end", b=0.0, h="to_end"\n )\n _check_keys_and_fill("insets", insets, inset_defaults)\n\n # ### shared_xaxes / shared_yaxes\n valid_shared_vals = [None, True, False, "rows", "columns", "all"]\n shared_err_msg = """\nThe {arg} argument to make_subplots must be one of: {valid_vals}\n Received value of type {typ}: {val}"""\n\n if shared_xaxes not in valid_shared_vals:\n val = shared_xaxes\n raise ValueError(\n shared_err_msg.format(\n arg="shared_xaxes",\n valid_vals=valid_shared_vals,\n typ=type(val),\n val=repr(val),\n )\n )\n if shared_yaxes not in valid_shared_vals:\n val = shared_yaxes\n raise ValueError(\n shared_err_msg.format(\n arg="shared_yaxes",\n valid_vals=valid_shared_vals,\n typ=type(val),\n val=repr(val),\n )\n )\n\n def _check_hv_spacing(dimsize, spacing, name, dimvarname, dimname):\n if spacing < 0 or spacing > 1:\n raise ValueError("%s spacing must be between 0 and 1." % (name,))\n if dimsize <= 1:\n return\n max_spacing = 1.0 / float(dimsize - 1)\n if spacing > max_spacing:\n raise ValueError(\n """{name} spacing cannot be greater than (1 / ({dimvarname} - 1)) = {max_spacing:f}.\nThe resulting plot would have {dimsize} {dimname} ({dimvarname}={dimsize}).""".format(\n dimvarname=dimvarname,\n name=name,\n dimname=dimname,\n max_spacing=max_spacing,\n dimsize=dimsize,\n )\n )\n\n # ### horizontal_spacing ###\n if horizontal_spacing is None:\n if has_secondary_y:\n horizontal_spacing = 0.4 / cols\n else:\n horizontal_spacing = 0.2 / cols\n # check horizontal_spacing can be satisfied:\n _check_hv_spacing(cols, horizontal_spacing, "Horizontal", "cols", "columns")\n\n # ### vertical_spacing ###\n if vertical_spacing is None:\n if subplot_titles is not None:\n vertical_spacing = 0.5 / rows\n else:\n vertical_spacing = 0.3 / rows\n # check vertical_spacing can be satisfied:\n _check_hv_spacing(rows, vertical_spacing, "Vertical", "rows", "rows")\n\n # ### subplot titles ###\n if subplot_titles is None:\n subplot_titles = [""] * rows * cols\n\n # ### column_widths ###\n if has_secondary_y:\n # Add room for secondary y-axis title\n max_width = 0.94\n elif row_titles:\n # Add a little breathing room between row labels and legend\n max_width = 0.98\n else:\n max_width = 1.0\n\n if column_widths is None:\n widths = [(max_width - horizontal_spacing * (cols - 1)) / cols] * cols\n elif isinstance(column_widths, (list, tuple)) and len(column_widths) == cols:\n cum_sum = float(sum(column_widths))\n widths = []\n for w in column_widths:\n widths.append((max_width - horizontal_spacing * (cols - 1)) * (w / cum_sum))\n else:\n raise ValueError(\n """\nThe 'column_widths' argument to make_subplots must be a list of numbers of \\nlength {cols}.\n Received value of type {typ}: {val}""".format(\n cols=cols, typ=type(column_widths), val=repr(column_widths)\n )\n )\n\n # ### row_heights ###\n if row_heights is None:\n heights = [(1.0 - vertical_spacing * (rows - 1)) / rows] * rows\n elif isinstance(row_heights, (list, tuple)) and len(row_heights) == rows:\n cum_sum = float(sum(row_heights))\n heights = []\n for h in row_heights:\n heights.append((1.0 - vertical_spacing * (rows - 1)) * (h / cum_sum))\n if row_dir < 0 and not use_legacy_row_heights_order:\n heights = list(reversed(heights))\n else:\n raise ValueError(\n """\nThe 'row_heights' argument to make_subplots must be a list of numbers of \\nlength {rows}.\n Received value of type {typ}: {val}""".format(\n rows=rows, typ=type(row_heights), val=repr(row_heights)\n )\n )\n\n # ### column_titles / row_titles ###\n if column_titles and not isinstance(column_titles, (list, tuple)):\n raise ValueError(\n """\nThe column_titles argument to make_subplots must be a list or tuple\n Received value of type {typ}: {val}""".format(\n typ=type(column_titles), val=repr(column_titles)\n )\n )\n\n if row_titles and not isinstance(row_titles, (list, tuple)):\n raise ValueError(\n """\nThe row_titles argument to make_subplots must be a list or tuple\n Received value of type {typ}: {val}""".format(\n typ=type(row_titles), val=repr(row_titles)\n )\n )\n\n # Init layout\n # -----------\n layout = go.Layout()\n\n # Build grid reference\n # --------------------\n # Built row/col sequence using 'row_dir' and 'col_dir'\n col_seq = range(cols)[::col_dir]\n row_seq = range(rows)[::row_dir]\n\n # Build 2D array of tuples of the start x and start y coordinate of each\n # subplot\n grid = [\n [\n (\n (sum(widths[:c]) + c * horizontal_spacing),\n (sum(heights[:r]) + r * vertical_spacing),\n )\n for c in col_seq\n ]\n for r in row_seq\n ]\n\n domains_grid = [[None for _ in range(cols)] for _ in range(rows)]\n\n # Initialize subplot reference lists for the grid and insets\n grid_ref = [[None for c in range(cols)] for r in range(rows)]\n\n list_of_domains = [] # added for subplot titles\n\n max_subplot_ids = _get_initial_max_subplot_ids()\n\n # Loop through specs -- (r, c) <-> (row, col)\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n if spec is None: # skip over None cells\n continue\n\n # ### Compute x and y domain for subplot ###\n c_spanned = c + spec["colspan"] - 1 # get spanned c\n r_spanned = r + spec["rowspan"] - 1 # get spanned r\n\n # Throw exception if 'colspan' | 'rowspan' is too large for grid\n if c_spanned >= cols:\n raise Exception(\n "Some 'colspan' value is too large for this subplot grid."\n )\n if r_spanned >= rows:\n raise Exception(\n "Some 'rowspan' value is too large for this subplot grid."\n )\n\n # Get x domain using grid and colspan\n x_s = grid[r][c][0] + spec["l"]\n\n x_e = grid[r][c_spanned][0] + widths[c_spanned] - spec["r"]\n x_domain = [x_s, x_e]\n\n # Get y domain (dep. on row_dir) using grid & r_spanned\n if row_dir > 0:\n y_s = grid[r][c][1] + spec["b"]\n y_e = grid[r_spanned][c][1] + heights[r_spanned] - spec["t"]\n else:\n y_s = grid[r_spanned][c][1] + spec["b"]\n y_e = grid[r][c][1] + heights[-1 - r] - spec["t"]\n\n if y_s < 0.0:\n # round for values very close to one\n # handles some floating point errors\n if y_s > -0.01:\n y_s = 0.0\n else:\n raise Exception(\n "A combination of the 'b' values, heights, and "\n "number of subplots too large for this subplot grid."\n )\n if y_s > 1.0:\n # round for values very close to one\n # handles some floating point errors\n if y_s < 1.01:\n y_s = 1.0\n else:\n raise Exception(\n "A combination of the 'b' values, heights, and "\n "number of subplots too large for this subplot grid."\n )\n\n if y_e < 0.0:\n if y_e > -0.01:\n y_e = 0.0\n else:\n raise Exception(\n "A combination of the 't' values, heights, and "\n "number of subplots too large for this subplot grid."\n )\n\n if y_e > 1.0:\n if y_e < 1.01:\n y_e = 1.0\n else:\n raise Exception(\n "A combination of the 't' values, heights, and "\n "number of subplots too large for this subplot grid."\n )\n\n y_domain = [y_s, y_e]\n\n list_of_domains.append(x_domain)\n list_of_domains.append(y_domain)\n\n domains_grid[r][c] = [x_domain, y_domain]\n\n # ### construct subplot container ###\n subplot_type = spec["type"]\n secondary_y = spec["secondary_y"]\n subplot_refs = _init_subplot(\n layout, subplot_type, secondary_y, x_domain, y_domain, max_subplot_ids\n )\n grid_ref[r][c] = subplot_refs\n\n _configure_shared_axes(layout, grid_ref, specs, "x", shared_xaxes, row_dir)\n _configure_shared_axes(layout, grid_ref, specs, "y", shared_yaxes, row_dir)\n\n # Build inset reference\n # ---------------------\n # Loop through insets\n insets_ref = [None for inset in range(len(insets))] if insets else None\n if insets:\n for i_inset, inset in enumerate(insets):\n r = inset["cell"][0] - 1\n c = inset["cell"][1] - 1\n\n # Throw exception if r | c is out of range\n if not (0 <= r < rows):\n raise Exception(\n "Some 'cell' row value is out of range. "\n "Note: the starting cell is (1, 1)"\n )\n if not (0 <= c < cols):\n raise Exception(\n "Some 'cell' col value is out of range. "\n "Note: the starting cell is (1, 1)"\n )\n\n # Get inset x domain using grid\n x_s = grid[r][c][0] + inset["l"] * widths[c]\n if inset["w"] == "to_end":\n x_e = grid[r][c][0] + widths[c]\n else:\n x_e = x_s + inset["w"] * widths[c]\n x_domain = [x_s, x_e]\n\n # Get inset y domain using grid\n y_s = grid[r][c][1] + inset["b"] * heights[-1 - r]\n if inset["h"] == "to_end":\n y_e = grid[r][c][1] + heights[-1 - r]\n else:\n y_e = y_s + inset["h"] * heights[-1 - r]\n y_domain = [y_s, y_e]\n\n list_of_domains.append(x_domain)\n list_of_domains.append(y_domain)\n\n subplot_type = inset["type"]\n\n subplot_refs = _init_subplot(\n layout, subplot_type, False, x_domain, y_domain, max_subplot_ids\n )\n\n insets_ref[i_inset] = subplot_refs\n\n # Build grid_str\n # This is the message printed when print_grid=True\n grid_str = _build_grid_str(specs, grid_ref, insets, insets_ref, row_seq)\n\n # Add subplot titles\n plot_title_annotations = _build_subplot_title_annotations(\n subplot_titles, list_of_domains\n )\n\n layout["annotations"] = plot_title_annotations\n\n # Add column titles\n if column_titles:\n domains_list = []\n if row_dir > 0:\n for c in range(cols):\n domain_pair = domains_grid[-1][c]\n if domain_pair:\n domains_list.extend(domain_pair)\n else:\n for c in range(cols):\n domain_pair = domains_grid[0][c]\n if domain_pair:\n domains_list.extend(domain_pair)\n\n # Add subplot titles\n column_title_annotations = _build_subplot_title_annotations(\n column_titles, domains_list\n )\n\n layout["annotations"] += tuple(column_title_annotations)\n\n if row_titles:\n domains_list = []\n for r in range(rows):\n domain_pair = domains_grid[r][-1]\n if domain_pair:\n domains_list.extend(domain_pair)\n\n # Add subplot titles\n column_title_annotations = _build_subplot_title_annotations(\n row_titles, domains_list, title_edge="right"\n )\n\n layout["annotations"] += tuple(column_title_annotations)\n\n if x_title:\n domains_list = [(0, max_width), (0, 1)]\n\n # Add subplot titles\n column_title_annotations = _build_subplot_title_annotations(\n [x_title], domains_list, title_edge="bottom", offset=30\n )\n\n layout["annotations"] += tuple(column_title_annotations)\n\n if y_title:\n domains_list = [(0, 1), (0, 1)]\n\n # Add subplot titles\n column_title_annotations = _build_subplot_title_annotations(\n [y_title], domains_list, title_edge="left", offset=40\n )\n\n layout["annotations"] += tuple(column_title_annotations)\n\n # Handle displaying grid information\n if print_grid:\n print(grid_str)\n\n # Build resulting figure\n if figure is None:\n figure = go.Figure()\n figure.update_layout(layout)\n\n # Attach subplot grid info to the figure\n figure.__dict__["_grid_ref"] = grid_ref\n figure.__dict__["_grid_str"] = grid_str\n\n return figure\n\n\ndef _configure_shared_axes(layout, grid_ref, specs, x_or_y, shared, row_dir):\n rows = len(grid_ref)\n cols = len(grid_ref[0])\n\n layout_key_ind = ["x", "y"].index(x_or_y)\n\n if row_dir < 0:\n rows_iter = range(rows - 1, -1, -1)\n else:\n rows_iter = range(rows)\n\n def update_axis_matches(first_axis_id, subplot_ref, spec, remove_label):\n if subplot_ref is None:\n return first_axis_id\n\n if x_or_y == "x":\n span = spec["colspan"]\n else:\n span = spec["rowspan"]\n\n if subplot_ref.subplot_type == "xy" and span == 1:\n if first_axis_id is None:\n first_axis_name = subplot_ref.layout_keys[layout_key_ind]\n first_axis_id = first_axis_name.replace("axis", "")\n else:\n axis_name = subplot_ref.layout_keys[layout_key_ind]\n axis_to_match = layout[axis_name]\n axis_to_match.matches = first_axis_id\n if remove_label:\n axis_to_match.showticklabels = False\n\n return first_axis_id\n\n if shared == "columns" or (x_or_y == "x" and shared is True):\n for c in range(cols):\n first_axis_id = None\n ok_to_remove_label = x_or_y == "x"\n for r in rows_iter:\n if not grid_ref[r][c]:\n continue\n subplot_ref = grid_ref[r][c][0]\n spec = specs[r][c]\n first_axis_id = update_axis_matches(\n first_axis_id, subplot_ref, spec, ok_to_remove_label\n )\n\n elif shared == "rows" or (x_or_y == "y" and shared is True):\n for r in rows_iter:\n first_axis_id = None\n ok_to_remove_label = x_or_y == "y"\n for c in range(cols):\n if not grid_ref[r][c]:\n continue\n subplot_ref = grid_ref[r][c][0]\n spec = specs[r][c]\n first_axis_id = update_axis_matches(\n first_axis_id, subplot_ref, spec, ok_to_remove_label\n )\n\n elif shared == "all":\n first_axis_id = None\n for c in range(cols):\n for ri, r in enumerate(rows_iter):\n if not grid_ref[r][c]:\n continue\n subplot_ref = grid_ref[r][c][0]\n spec = specs[r][c]\n\n if x_or_y == "y":\n ok_to_remove_label = c > 0\n else:\n ok_to_remove_label = ri > 0 if row_dir > 0 else r < rows - 1\n\n first_axis_id = update_axis_matches(\n first_axis_id, subplot_ref, spec, ok_to_remove_label\n )\n\n\ndef _init_subplot_xy(layout, secondary_y, x_domain, y_domain, max_subplot_ids=None):\n if max_subplot_ids is None:\n max_subplot_ids = _get_initial_max_subplot_ids()\n\n # Get axis label and anchor\n x_cnt = max_subplot_ids["xaxis"] + 1\n y_cnt = max_subplot_ids["yaxis"] + 1\n\n # Compute x/y labels (the values of trace.xaxis/trace.yaxis\n x_label = "x{cnt}".format(cnt=x_cnt if x_cnt > 1 else "")\n y_label = "y{cnt}".format(cnt=y_cnt if y_cnt > 1 else "")\n\n # Anchor x and y axes to each other\n x_anchor, y_anchor = y_label, x_label\n\n # Build layout.xaxis/layout.yaxis containers\n xaxis_name = "xaxis{cnt}".format(cnt=x_cnt if x_cnt > 1 else "")\n yaxis_name = "yaxis{cnt}".format(cnt=y_cnt if y_cnt > 1 else "")\n x_axis = {"domain": x_domain, "anchor": x_anchor}\n y_axis = {"domain": y_domain, "anchor": y_anchor}\n\n layout[xaxis_name] = x_axis\n layout[yaxis_name] = y_axis\n\n subplot_refs = [\n SubplotRef(\n subplot_type="xy",\n layout_keys=(xaxis_name, yaxis_name),\n trace_kwargs={"xaxis": x_label, "yaxis": y_label},\n )\n ]\n\n if secondary_y:\n y_cnt += 1\n secondary_yaxis_name = "yaxis{cnt}".format(cnt=y_cnt if y_cnt > 1 else "")\n secondary_y_label = "y{cnt}".format(cnt=y_cnt)\n\n # Add secondary y-axis to subplot reference\n subplot_refs.append(\n SubplotRef(\n subplot_type="xy",\n layout_keys=(xaxis_name, secondary_yaxis_name),\n trace_kwargs={"xaxis": x_label, "yaxis": secondary_y_label},\n )\n )\n\n # Add secondary y axis to layout\n secondary_y_axis = {"anchor": y_anchor, "overlaying": y_label, "side": "right"}\n layout[secondary_yaxis_name] = secondary_y_axis\n\n # increment max_subplot_ids\n max_subplot_ids["xaxis"] = x_cnt\n max_subplot_ids["yaxis"] = y_cnt\n\n return tuple(subplot_refs)\n\n\ndef _init_subplot_single(\n layout, subplot_type, x_domain, y_domain, max_subplot_ids=None\n):\n if max_subplot_ids is None:\n max_subplot_ids = _get_initial_max_subplot_ids()\n\n # Add scene to layout\n cnt = max_subplot_ids[subplot_type] + 1\n label = "{subplot_type}{cnt}".format(\n subplot_type=subplot_type, cnt=cnt if cnt > 1 else ""\n )\n scene = dict(domain={"x": x_domain, "y": y_domain})\n layout[label] = scene\n\n trace_key = (\n "subplot" if subplot_type in _subplot_prop_named_subplot else subplot_type\n )\n\n subplot_ref = SubplotRef(\n subplot_type=subplot_type, layout_keys=(label,), trace_kwargs={trace_key: label}\n )\n\n # increment max_subplot_id\n max_subplot_ids[subplot_type] = cnt\n\n return (subplot_ref,)\n\n\ndef _init_subplot_domain(x_domain, y_domain):\n # No change to layout since domain traces are labeled individually\n subplot_ref = SubplotRef(\n subplot_type="domain",\n layout_keys=(),\n trace_kwargs={"domain": {"x": tuple(x_domain), "y": tuple(y_domain)}},\n )\n\n return (subplot_ref,)\n\n\ndef _subplot_type_for_trace_type(trace_type):\n from plotly.validator_cache import ValidatorCache\n\n DataValidator = ValidatorCache.get_validator("", "data")\n\n trace_validator = DataValidator\n if trace_type in trace_validator.class_strs_map:\n # subplot_type is a trace name, find the subplot type for trace\n trace = trace_validator.validate_coerce([{"type": trace_type}])[0]\n if "domain" in trace:\n return "domain"\n elif "xaxis" in trace and "yaxis" in trace:\n return "xy"\n elif "geo" in trace:\n return "geo"\n elif "scene" in trace:\n return "scene"\n elif "subplot" in trace:\n for t in _subplot_prop_named_subplot:\n try:\n trace.subplot = t\n return t\n except ValueError:\n pass\n\n return None\n\n\ndef _validate_coerce_subplot_type(subplot_type):\n # Lowercase subplot_type\n orig_subplot_type = subplot_type\n subplot_type = subplot_type.lower()\n\n # Check if it's a named subplot type\n if subplot_type in _subplot_types:\n return subplot_type\n\n # Try to determine subplot type for trace\n subplot_type = _subplot_type_for_trace_type(subplot_type)\n\n if subplot_type is None:\n raise ValueError("Unsupported subplot type: {}".format(repr(orig_subplot_type)))\n else:\n return subplot_type\n\n\ndef _init_subplot(\n layout, subplot_type, secondary_y, x_domain, y_domain, max_subplot_ids=None\n):\n # Normalize subplot type\n subplot_type = _validate_coerce_subplot_type(subplot_type)\n\n if max_subplot_ids is None:\n max_subplot_ids = _get_initial_max_subplot_ids()\n\n # Clamp domain elements between [0, 1].\n # This is only needed to combat numerical precision errors\n # See GH1031\n x_domain = [max(0.0, x_domain[0]), min(1.0, x_domain[1])]\n y_domain = [max(0.0, y_domain[0]), min(1.0, y_domain[1])]\n\n if subplot_type == "xy":\n subplot_refs = _init_subplot_xy(\n layout, secondary_y, x_domain, y_domain, max_subplot_ids\n )\n elif subplot_type in _single_subplot_types:\n subplot_refs = _init_subplot_single(\n layout, subplot_type, x_domain, y_domain, max_subplot_ids\n )\n elif subplot_type == "domain":\n subplot_refs = _init_subplot_domain(x_domain, y_domain)\n else:\n raise ValueError("Unsupported subplot type: {}".format(repr(subplot_type)))\n\n return subplot_refs\n\n\ndef _get_cartesian_label(x_or_y, r, c, cnt):\n # Default label (given strictly by cnt)\n label = "{x_or_y}{cnt}".format(x_or_y=x_or_y, cnt=cnt)\n return label\n\n\ndef _build_subplot_title_annotations(\n subplot_titles, list_of_domains, title_edge="top", offset=0\n):\n # If shared_axes is False (default) use list_of_domains\n # This is used for insets and irregular layouts\n # if not shared_xaxes and not shared_yaxes:\n x_dom = list_of_domains[::2]\n y_dom = list_of_domains[1::2]\n subtitle_pos_x = []\n subtitle_pos_y = []\n\n if title_edge == "top":\n text_angle = 0\n xanchor = "center"\n yanchor = "bottom"\n\n for x_domains in x_dom:\n subtitle_pos_x.append(sum(x_domains) / 2.0)\n for y_domains in y_dom:\n subtitle_pos_y.append(y_domains[1])\n\n yshift = offset\n xshift = 0\n elif title_edge == "bottom":\n text_angle = 0\n xanchor = "center"\n yanchor = "top"\n\n for x_domains in x_dom:\n subtitle_pos_x.append(sum(x_domains) / 2.0)\n for y_domains in y_dom:\n subtitle_pos_y.append(y_domains[0])\n\n yshift = -offset\n xshift = 0\n elif title_edge == "right":\n text_angle = 90\n xanchor = "left"\n yanchor = "middle"\n\n for x_domains in x_dom:\n subtitle_pos_x.append(x_domains[1])\n for y_domains in y_dom:\n subtitle_pos_y.append(sum(y_domains) / 2.0)\n\n yshift = 0\n xshift = offset\n elif title_edge == "left":\n text_angle = -90\n xanchor = "right"\n yanchor = "middle"\n\n for x_domains in x_dom:\n subtitle_pos_x.append(x_domains[0])\n for y_domains in y_dom:\n subtitle_pos_y.append(sum(y_domains) / 2.0)\n\n yshift = 0\n xshift = -offset\n else:\n raise ValueError("Invalid annotation edge '{edge}'".format(edge=title_edge))\n\n plot_titles = []\n for index in range(len(subplot_titles)):\n if not subplot_titles[index] or index >= len(subtitle_pos_y):\n pass\n else:\n annot = {\n "y": subtitle_pos_y[index],\n "xref": "paper",\n "x": subtitle_pos_x[index],\n "yref": "paper",\n "text": subplot_titles[index],\n "showarrow": False,\n "font": dict(size=16),\n "xanchor": xanchor,\n "yanchor": yanchor,\n }\n\n if xshift != 0:\n annot["xshift"] = xshift\n\n if yshift != 0:\n annot["yshift"] = yshift\n\n if text_angle != 0:\n annot["textangle"] = text_angle\n\n plot_titles.append(annot)\n return plot_titles\n\n\ndef _build_grid_str(specs, grid_ref, insets, insets_ref, row_seq):\n # Compute rows and columns\n rows = len(specs)\n cols = len(specs[0])\n\n # Initialize constants\n sp = " " # space between cell\n s_str = "[ " # cell start string\n e_str = " ]" # cell end string\n\n s_top = "⎡ " # U+23A1\n s_mid = "⎢ " # U+23A2\n s_bot = "⎣ " # U+23A3\n\n e_top = " ⎤" # U+23A4\n e_mid = " ⎟" # U+239F\n e_bot = " ⎦" # U+23A6\n\n colspan_str = " -" # colspan string\n rowspan_str = " :" # rowspan string\n empty_str = " (empty) " # empty cell string\n # Init grid_str with intro message\n grid_str = "This is the format of your plot grid:\n"\n\n # Init tmp list of lists of strings (sorta like 'grid_ref' but w/ strings)\n _tmp = [["" for c in range(cols)] for r in range(rows)]\n\n # Define cell string as function of (r, c) and grid_ref\n def _get_cell_str(r, c, subplot_refs):\n layout_keys = sorted({k for ref in subplot_refs for k in ref.layout_keys})\n\n ref_str = ",".join(layout_keys)\n\n # Replace yaxis2 -> y2\n ref_str = ref_str.replace("axis", "")\n return "({r},{c}) {ref}".format(r=r + 1, c=c + 1, ref=ref_str)\n\n # Find max len of _cell_str, add define a padding function\n cell_len = (\n max(\n [\n len(_get_cell_str(r, c, ref))\n for r, row_ref in enumerate(grid_ref)\n for c, ref in enumerate(row_ref)\n if ref\n ]\n )\n + len(s_str)\n + len(e_str)\n )\n\n def _pad(s, cell_len=cell_len):\n return " " * (cell_len - len(s))\n\n # Loop through specs, fill in _tmp\n for r, spec_row in enumerate(specs):\n for c, spec in enumerate(spec_row):\n ref = grid_ref[r][c]\n if ref is None:\n if _tmp[r][c] == "":\n _tmp[r][c] = empty_str + _pad(empty_str)\n continue\n\n if spec["rowspan"] > 1:\n cell_str = s_top + _get_cell_str(r, c, ref)\n else:\n cell_str = s_str + _get_cell_str(r, c, ref)\n\n if spec["colspan"] > 1:\n for cc in range(1, spec["colspan"] - 1):\n _tmp[r][c + cc] = colspan_str + _pad(colspan_str)\n\n if spec["rowspan"] > 1:\n _tmp[r][c + spec["colspan"] - 1] = (\n colspan_str + _pad(colspan_str + e_str)\n ) + e_top\n else:\n _tmp[r][c + spec["colspan"] - 1] = (\n colspan_str + _pad(colspan_str + e_str)\n ) + e_str\n else:\n padding = " " * (cell_len - len(cell_str) - 2)\n if spec["rowspan"] > 1:\n cell_str += padding + e_top\n else:\n cell_str += padding + e_str\n\n if spec["rowspan"] > 1:\n for cc in range(spec["colspan"]):\n for rr in range(1, spec["rowspan"]):\n row_str = rowspan_str + _pad(rowspan_str)\n if cc == 0:\n if rr < spec["rowspan"] - 1:\n row_str = s_mid + row_str[2:]\n else:\n row_str = s_bot + row_str[2:]\n\n if cc == spec["colspan"] - 1:\n if rr < spec["rowspan"] - 1:\n row_str = row_str[:-2] + e_mid\n else:\n row_str = row_str[:-2] + e_bot\n\n _tmp[r + rr][c + cc] = row_str\n\n _tmp[r][c] = cell_str + _pad(cell_str)\n\n # Append grid_str using data from _tmp in the correct order\n for r in row_seq[::-1]:\n grid_str += sp.join(_tmp[r]) + "\n"\n\n # Append grid_str to include insets info\n if insets:\n grid_str += "\nWith insets:\n"\n for i_inset, inset in enumerate(insets):\n r = inset["cell"][0] - 1\n c = inset["cell"][1] - 1\n ref = grid_ref[r][c]\n\n subplot_labels_str = ",".join(insets_ref[i_inset][0].layout_keys)\n\n # Replace, e.g., yaxis2 -> y2\n subplot_labels_str = subplot_labels_str.replace("axis", "")\n\n grid_str += (\n s_str\n + subplot_labels_str\n + e_str\n + " over "\n + s_str\n + _get_cell_str(r, c, ref)\n + e_str\n + "\n"\n )\n return grid_str\n\n\ndef _set_trace_grid_reference(trace, layout, grid_ref, row, col, secondary_y=False):\n if row <= 0:\n raise Exception("Row value is out of range. Note: the starting cell is (1, 1)")\n if col <= 0:\n raise Exception("Col value is out of range. Note: the starting cell is (1, 1)")\n try:\n subplot_refs = grid_ref[row - 1][col - 1]\n except IndexError:\n raise Exception(\n "The (row, col) pair sent is out of "\n "range. Use Figure.print_grid to view the "\n "subplot grid. "\n )\n\n if not subplot_refs:\n raise ValueError(\n """\nNo subplot specified at grid position ({row}, {col})""".format(row=row, col=col)\n )\n\n if secondary_y:\n if len(subplot_refs) < 2:\n raise ValueError(\n """\nSubplot with type '{subplot_type}' at grid position ({row}, {col}) was not\ncreated with the secondary_y spec property set to True. See the docstring\nfor the specs argument to plotly.subplots.make_subplots for more information.\n"""\n )\n trace_kwargs = subplot_refs[1].trace_kwargs\n else:\n trace_kwargs = subplot_refs[0].trace_kwargs\n\n for k in trace_kwargs:\n if k not in trace:\n raise ValueError(\n """\\nTrace type '{typ}' is not compatible with subplot type '{subplot_type}'\nat grid position ({row}, {col})\n\nSee the docstring for the specs argument to plotly.subplots.make_subplots\nfor more information on subplot types""".format(\n typ=trace.type,\n subplot_type=subplot_refs[0].subplot_type,\n row=row,\n col=col,\n )\n )\n\n # Update trace reference\n trace.update(trace_kwargs)\n\n\ndef _get_grid_subplot(fig, row, col, secondary_y=False):\n try:\n grid_ref = fig._grid_ref\n except AttributeError:\n raise Exception(\n "In order to reference traces by row and column, "\n "you must first use "\n "plotly.tools.make_subplots "\n "to create the figure with a subplot grid."\n )\n\n rows = len(grid_ref)\n cols = len(grid_ref[0])\n\n # Validate row\n if not isinstance(row, int) or row < 1 or rows < row:\n raise ValueError(\n """\nThe row argument to get_subplot must be an integer where 1 <= row <= {rows}\n Received value of type {typ}: {val}""".format(\n rows=rows, typ=type(row), val=repr(row)\n )\n )\n\n if not isinstance(col, int) or col < 1 or cols < col:\n raise ValueError(\n """\nThe col argument to get_subplot must be an integer where 1 <= row <= {cols}\n Received value of type {typ}: {val}""".format(\n cols=cols, typ=type(col), val=repr(col)\n )\n )\n\n subplot_refs = fig._grid_ref[row - 1][col - 1]\n if not subplot_refs:\n return None\n\n if secondary_y:\n if len(subplot_refs) > 1:\n layout_keys = subplot_refs[1].layout_keys\n else:\n return None\n else:\n layout_keys = subplot_refs[0].layout_keys\n\n if len(layout_keys) == 0:\n return SubplotDomain(**subplot_refs[0].trace_kwargs["domain"])\n elif len(layout_keys) == 1:\n return fig.layout[layout_keys[0]]\n elif len(layout_keys) == 2:\n return SubplotXY(\n xaxis=fig.layout[layout_keys[0]], yaxis=fig.layout[layout_keys[1]]\n )\n else:\n raise ValueError(\n """\nUnexpected subplot type with layout_keys of {}""".format(layout_keys)\n )\n\n\ndef _get_subplot_ref_for_trace(trace):\n if "domain" in trace:\n return SubplotRef(\n subplot_type="domain",\n layout_keys=(),\n trace_kwargs={"domain": {"x": trace.domain.x, "y": trace.domain.y}},\n )\n\n elif "xaxis" in trace and "yaxis" in trace:\n xaxis_name = "xaxis" + trace.xaxis[1:] if trace.xaxis else "xaxis"\n yaxis_name = "yaxis" + trace.yaxis[1:] if trace.yaxis else "yaxis"\n\n return SubplotRef(\n subplot_type="xy",\n layout_keys=(xaxis_name, yaxis_name),\n trace_kwargs={"xaxis": trace.xaxis, "yaxis": trace.yaxis},\n )\n elif "geo" in trace:\n return SubplotRef(\n subplot_type="geo",\n layout_keys=(trace.geo,),\n trace_kwargs={"geo": trace.geo},\n )\n elif "scene" in trace:\n return SubplotRef(\n subplot_type="scene",\n layout_keys=(trace.scene,),\n trace_kwargs={"scene": trace.scene},\n )\n elif "subplot" in trace:\n for t in _subplot_prop_named_subplot:\n try:\n validator = trace._get_prop_validator("subplot")\n validator.validate_coerce(t)\n return SubplotRef(\n subplot_type=t,\n layout_keys=(trace.subplot,),\n trace_kwargs={"subplot": trace.subplot},\n )\n except ValueError:\n pass\n\n return None\n
.venv\Lib\site-packages\plotly\_subplots.py
_subplots.py
Python
52,346
0.75
0.167433
0.102462
react-lib
178
2023-10-10T18:53:43.969970
Apache-2.0
false
8f1f0ea660ff687b4f794c204f8bbc19
"""\nhttps://plot.ly/python/\n\nPlotly's Python API allows users to programmatically access Plotly's\nserver resources.\n\nThis package is organized as follows:\n\nSubpackages:\n\n- plotly: all functionality that requires access to Plotly's servers\n\n- graph_objs: objects for designing figures and visualizing data\n\n- matplotlylib: tools to convert matplotlib figures\n\nModules:\n\n- tools: some helpful tools that do not require access to Plotly's servers\n\n- utils: functions that you probably won't need, but that subpackages use\n\n- version: holds the current API version\n\n- exceptions: defines our custom exception classes\n\n"""\n\nfrom typing import TYPE_CHECKING\nfrom _plotly_utils.importers import relative_import\nimport importlib.metadata\n\n# This is the version of the plotly package\n__version__ = importlib.metadata.version("plotly")\nversion = __version__\n\nif TYPE_CHECKING:\n from plotly import (\n graph_objs,\n tools,\n utils,\n offline,\n colors,\n io,\n data,\n )\n from plotly.version import __version__\n\n __all__ = [\n "graph_objs",\n "tools",\n "utils",\n "offline",\n "colors",\n "io",\n "data",\n "__version__",\n ]\n\n # Set default template (for >= 3.7 this is done in ploty/io/__init__.py)\n from plotly.io import templates\n\n templates._default = "plotly"\nelse:\n __all__, __getattr__, __dir__ = relative_import(\n __name__,\n [\n ".graph_objs",\n ".graph_objects",\n ".tools",\n ".utils",\n ".offline",\n ".colors",\n ".io",\n ".data",\n ],\n [".version.__version__"],\n )\n\n\ndef plot(data_frame, kind, **kwargs):\n """\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend="plotly"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n """\n from .express import (\n scatter,\n line,\n area,\n bar,\n box,\n histogram,\n violin,\n strip,\n funnel,\n density_contour,\n density_heatmap,\n imshow,\n )\n\n if kind == "scatter":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["s", "c"]}\n return scatter(data_frame, **new_kwargs)\n if kind == "line":\n return line(data_frame, **kwargs)\n if kind == "area":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["stacked"]}\n return area(data_frame, **new_kwargs)\n if kind == "bar":\n return bar(data_frame, **kwargs)\n if kind == "barh":\n return bar(data_frame, orientation="h", **kwargs)\n if kind == "box":\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by"]}\n return box(data_frame, **new_kwargs)\n if kind in ["hist", "histogram"]:\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by", "bins"]}\n return histogram(data_frame, **new_kwargs)\n if kind == "violin":\n return violin(data_frame, **kwargs)\n if kind == "strip":\n return strip(data_frame, **kwargs)\n if kind == "funnel":\n return funnel(data_frame, **kwargs)\n if kind == "density_contour":\n return density_contour(data_frame, **kwargs)\n if kind == "density_heatmap":\n return density_heatmap(data_frame, **kwargs)\n if kind == "imshow":\n return imshow(data_frame, **kwargs)\n if kind == "heatmap":\n raise ValueError(\n "kind='heatmap' not supported plotting.backend='plotly'. "\n "Please use kind='imshow' or kind='density_heatmap'."\n )\n\n raise NotImplementedError(\n "kind='%s' not yet supported for plotting.backend='plotly'" % kind\n )\n\n\ndef boxplot_frame(data_frame, **kwargs):\n """\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend="plotly"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n """\n from .express import box\n\n skip = ["by", "column", "ax", "fontsize", "rot", "grid", "figsize", "layout"]\n skip += ["return_type"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return box(data_frame, **new_kwargs)\n\n\ndef hist_frame(data_frame, **kwargs):\n """\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend="plotly"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n """\n from .express import histogram\n\n skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"]\n skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins", "legend"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n\n\ndef hist_series(data_frame, **kwargs):\n """\n Pandas plotting backend function, not meant to be called directly.\n To activate, set pandas.options.plotting.backend="plotly"\n See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py\n """\n from .express import histogram\n\n skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"]\n skip += ["figsize", "bins", "legend"]\n new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}\n return histogram(data_frame, **new_kwargs)\n\n\ndef _jupyter_labextension_paths():\n """Called by Jupyter Lab Server to detect if it is a valid labextension and\n to install the extension.\n """\n return [\n {\n "src": "labextension/static",\n "dest": "jupyterlab-plotly",\n }\n ]\n
.venv\Lib\site-packages\plotly\__init__.py
__init__.py
Python
5,696
0.95
0.216495
0.012422
react-lib
719
2024-04-17T13:04:27.419694
Apache-2.0
false
fcc0d09da9b01ee46849f6816308aa9c
\n\n
.venv\Lib\site-packages\plotly\api\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
185
0.7
0
0
react-lib
906
2025-04-14T04:14:09.339149
BSD-3-Clause
false
c87c348ca2a599b8e7c9d4d09eacfe38
# ruff: noqa: F405\n\n"""For a list of colors available in `plotly.colors`, please see\n\n* the `tutorial on discrete color sequences <https://plotly.com/python/discrete-color/#color-sequences-in-plotly-express>`_\n* the `list of built-in continuous color scales <https://plotly.com/python/builtin-colorscales/>`_\n* the `tutorial on continuous colors <https://plotly.com/python/colorscales/>`_\n\nColor scales and sequences are available within the following namespaces\n\n* cyclical\n* diverging\n* qualitative\n* sequential\n"""\n\nfrom _plotly_utils.colors import * # noqa: F403\n\n__all__ = [\n "named_colorscales",\n "cyclical",\n "diverging",\n "sequential",\n "qualitative",\n "colorbrewer",\n "carto",\n "cmocean",\n "color_parser",\n "colorscale_to_colors",\n "colorscale_to_scale",\n "convert_colors_to_same_type",\n "convert_colorscale_to_rgb",\n "convert_dict_colors_to_same_type",\n "convert_to_RGB_255",\n "find_intermediate_color",\n "hex_to_rgb",\n "label_rgb",\n "make_colorscale",\n "n_colors",\n "sample_colorscale",\n "unconvert_from_RGB_255",\n "unlabel_rgb",\n "validate_colors",\n "validate_colors_dict",\n "validate_colorscale",\n "validate_scale_values",\n "plotlyjs",\n "DEFAULT_PLOTLY_COLORS",\n "PLOTLY_SCALES",\n "get_colorscale",\n]\n
.venv\Lib\site-packages\plotly\colors\__init__.py
__init__.py
Python
1,308
0.95
0
0.177778
react-lib
170
2024-04-06T11:21:52.290347
Apache-2.0
false
11ab0787cf2df8859e6833bfdd2f6074
\n\n
.venv\Lib\site-packages\plotly\colors\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,311
0.8
0
0.538462
python-kit
772
2025-03-27T07:28:07.745987
BSD-3-Clause
false
167cd0fd5ebb6878401956329b82c668
"""\nBuilt-in datasets for demonstration, educational and test purposes.\n"""\n\nimport os\nfrom importlib import import_module\n\nimport narwhals.stable.v1 as nw\n\nAVAILABLE_BACKENDS = {"pandas", "polars", "pyarrow", "modin", "cudf"}\nBACKENDS_WITH_INDEX_SUPPORT = {"pandas", "modin", "cudf"}\n\n\ndef gapminder(\n datetimes=False,\n centroids=False,\n year=None,\n pretty_names=False,\n return_type="pandas",\n):\n """\n Each row represents a country on a given year.\n\n https://www.gapminder.org/data/\n\n Parameters\n ----------\n datetimes: bool\n Whether or not 'year' column will converted to datetime type\n\n centroids: bool\n If True, ['centroid_lat', 'centroid_lon'] columns are added\n\n year: int | None\n If provided, the dataset will be filtered for that year\n\n pretty_names: bool\n If True, prettifies the column names\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 1704 rows and the following columns:\n `['country', 'continent', 'year', 'lifeExp', 'pop', 'gdpPercap',\n 'iso_alpha', 'iso_num']`.\n\n If `datetimes` is True, the 'year' column will be a datetime column\n If `centroids` is True, two new columns are added: ['centroid_lat', 'centroid_lon']\n If `year` is an integer, the dataset will be filtered for that year\n """\n df = nw.from_native(\n _get_dataset("gapminder", return_type=return_type), eager_only=True\n )\n if year:\n df = df.filter(nw.col("year") == year)\n if datetimes:\n df = df.with_columns(\n # Concatenate the year value with the literal "-01-01" so that it can be\n # casted to datetime from "%Y-%m-%d" format\n nw.concat_str(\n [nw.col("year").cast(nw.String()), nw.lit("-01-01")]\n ).str.to_datetime(format="%Y-%m-%d")\n )\n if not centroids:\n df = df.drop("centroid_lat", "centroid_lon")\n if pretty_names:\n df = df.rename(\n dict(\n country="Country",\n continent="Continent",\n year="Year",\n lifeExp="Life Expectancy",\n gdpPercap="GDP per Capita",\n pop="Population",\n iso_alpha="ISO Alpha Country Code",\n iso_num="ISO Numeric Country Code",\n centroid_lat="Centroid Latitude",\n centroid_lon="Centroid Longitude",\n )\n )\n return df.to_native()\n\n\ndef tips(pretty_names=False, return_type="pandas"):\n """\n Each row represents a restaurant bill.\n\n https://vincentarelbundock.github.io/Rdatasets/doc/reshape2/tips.html\n\n Parameters\n ----------\n pretty_names: bool\n If True, prettifies the column names\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 244 rows and the following columns:\n `['total_bill', 'tip', 'sex', 'smoker', 'day', 'time', 'size']`.\n """\n\n df = nw.from_native(_get_dataset("tips", return_type=return_type), eager_only=True)\n if pretty_names:\n df = df.rename(\n dict(\n total_bill="Total Bill",\n tip="Tip",\n sex="Payer Gender",\n smoker="Smokers at Table",\n day="Day of Week",\n time="Meal",\n size="Party Size",\n )\n )\n return df.to_native()\n\n\ndef iris(return_type="pandas"):\n """\n Each row represents a flower.\n\n https://en.wikipedia.org/wiki/Iris_flower_data_set\n\n Parameters\n ----------\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 150 rows and the following columns:\n `['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species', 'species_id']`.\n """\n return _get_dataset("iris", return_type=return_type)\n\n\ndef wind(return_type="pandas"):\n """\n Each row represents a level of wind intensity in a cardinal direction, and its frequency.\n\n Parameters\n ----------\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 128 rows and the following columns:\n `['direction', 'strength', 'frequency']`.\n """\n return _get_dataset("wind", return_type=return_type)\n\n\ndef election(return_type="pandas"):\n """\n Each row represents voting results for an electoral district in the 2013 Montreal\n mayoral election.\n\n Parameters\n ----------\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 58 rows and the following columns:\n `['district', 'Coderre', 'Bergeron', 'Joly', 'total', 'winner', 'result', 'district_id']`.\n """\n return _get_dataset("election", return_type=return_type)\n\n\ndef election_geojson():\n """\n Each feature represents an electoral district in the 2013 Montreal mayoral election.\n\n Returns\n -------\n A GeoJSON-formatted `dict` with 58 polygon or multi-polygon features whose `id`\n is an electoral district numerical ID and whose `district` property is the ID and\n district name.\n """\n import gzip\n import json\n import os\n\n path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n "package_data",\n "datasets",\n "election.geojson.gz",\n )\n with gzip.GzipFile(path, "r") as f:\n result = json.loads(f.read().decode("utf-8"))\n return result\n\n\ndef carshare(return_type="pandas"):\n """\n Each row represents the availability of car-sharing services near the centroid of a zone\n in Montreal over a month-long period.\n\n Parameters\n ----------\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe` with 249 rows and the following columns:\n `['centroid_lat', 'centroid_lon', 'car_hours', 'peak_hour']`.\n """\n return _get_dataset("carshare", return_type=return_type)\n\n\ndef stocks(indexed=False, datetimes=False, return_type="pandas"):\n """\n Each row in this wide dataset represents closing prices from 6 tech stocks in 2018/2019.\n\n Parameters\n ----------\n indexed: bool\n Whether or not the 'date' column is used as the index and the column index\n is named 'company'. Applicable only if `return_type='pandas'`\n\n datetimes: bool\n Whether or not the 'date' column will be of datetime type\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 100 rows and the following columns:\n `['date', 'GOOG', 'AAPL', 'AMZN', 'FB', 'NFLX', 'MSFT']`.\n If `indexed` is True, the 'date' column is used as the index and the column index\n is named 'company'\n If `datetimes` is True, the 'date' column will be a datetime column\n """\n if indexed and return_type not in BACKENDS_WITH_INDEX_SUPPORT:\n msg = f"Backend '{return_type}' does not support setting index"\n raise NotImplementedError(msg)\n\n df = nw.from_native(\n _get_dataset("stocks", return_type=return_type), eager_only=True\n ).with_columns(nw.col("date").cast(nw.String()))\n\n if datetimes:\n df = df.with_columns(nw.col("date").str.to_datetime())\n\n if indexed: # then it must be pandas\n df = df.to_native().set_index("date")\n df.columns.name = "company"\n return df\n\n return df.to_native()\n\n\ndef experiment(indexed=False, return_type="pandas"):\n """\n Each row in this wide dataset represents the results of 100 simulated participants\n on three hypothetical experiments, along with their gender and control/treatment group.\n\n Parameters\n ----------\n indexed: bool\n If True, then the index is named "participant".\n Applicable only if `return_type='pandas'`\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 100 rows and the following columns:\n `['experiment_1', 'experiment_2', 'experiment_3', 'gender', 'group']`.\n If `indexed` is True, the data frame index is named "participant"\n """\n\n if indexed and return_type not in BACKENDS_WITH_INDEX_SUPPORT:\n msg = f"Backend '{return_type}' does not support setting index"\n raise NotImplementedError(msg)\n\n df = nw.from_native(\n _get_dataset("experiment", return_type=return_type), eager_only=True\n )\n if indexed: # then it must be pandas\n df = df.to_native()\n df.index.name = "participant"\n return df\n return df.to_native()\n\n\ndef medals_wide(indexed=False, return_type="pandas"):\n """\n This dataset represents the medal table for Olympic Short Track Speed Skating for the\n top three nations as of 2020.\n\n Parameters\n ----------\n indexed: bool\n Whether or not the 'nation' column is used as the index and the column index\n is named 'medal'. Applicable only if `return_type='pandas'`\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 3 rows and the following columns:\n `['nation', 'gold', 'silver', 'bronze']`.\n If `indexed` is True, the 'nation' column is used as the index and the column index\n is named 'medal'\n """\n\n if indexed and return_type not in BACKENDS_WITH_INDEX_SUPPORT:\n msg = f"Backend '{return_type}' does not support setting index"\n raise NotImplementedError(msg)\n\n df = nw.from_native(\n _get_dataset("medals", return_type=return_type), eager_only=True\n )\n if indexed: # then it must be pandas\n df = df.to_native().set_index("nation")\n df.columns.name = "medal"\n return df\n return df.to_native()\n\n\ndef medals_long(indexed=False, return_type="pandas"):\n """\n This dataset represents the medal table for Olympic Short Track Speed Skating for the\n top three nations as of 2020.\n\n Parameters\n ----------\n indexed: bool\n Whether or not the 'nation' column is used as the index.\n Applicable only if `return_type='pandas'`\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n Dataframe with 9 rows and the following columns: `['nation', 'medal', 'count']`.\n If `indexed` is True, the 'nation' column is used as the index.\n """\n\n if indexed and return_type not in BACKENDS_WITH_INDEX_SUPPORT:\n msg = f"Backend '{return_type}' does not support setting index"\n raise NotImplementedError(msg)\n\n df = nw.from_native(\n _get_dataset("medals", return_type=return_type), eager_only=True\n ).unpivot(\n index=["nation"],\n value_name="count",\n variable_name="medal",\n )\n if indexed:\n df = nw.maybe_set_index(df, "nation")\n return df.to_native()\n\n\ndef _get_dataset(d, return_type):\n """\n Loads the dataset using the specified backend.\n\n Notice that the available backends are 'pandas', 'polars', 'pyarrow' and they all have\n a `read_csv` function (pyarrow has it via pyarrow.csv). Therefore we can dynamically\n load the library using `importlib.import_module` and then call\n `backend.read_csv(filepath)`.\n\n Parameters\n ----------\n d: str\n Name of the dataset to load.\n\n return_type: {'pandas', 'polars', 'pyarrow', 'modin', 'cudf'}\n Type of the resulting dataframe\n\n Returns\n -------\n Dataframe of `return_type` type\n """\n filepath = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n "package_data",\n "datasets",\n d + ".csv.gz",\n )\n\n if return_type not in AVAILABLE_BACKENDS:\n msg = (\n f"Unsupported return_type. Found {return_type}, expected one "\n f"of {AVAILABLE_BACKENDS}"\n )\n raise NotImplementedError(msg)\n\n try:\n if return_type == "pyarrow":\n module_to_load = "pyarrow.csv"\n elif return_type == "modin":\n module_to_load = "modin.pandas"\n else:\n module_to_load = return_type\n backend = import_module(module_to_load)\n except ModuleNotFoundError:\n msg = f"return_type={return_type}, but {return_type} is not installed"\n raise ModuleNotFoundError(msg)\n\n try:\n return backend.read_csv(filepath)\n except Exception as e:\n msg = f"Unable to read '{d}' dataset due to: {e}"\n raise Exception(msg).with_traceback(e.__traceback__)\n
.venv\Lib\site-packages\plotly\data\__init__.py
__init__.py
Python
13,310
0.95
0.1
0.005731
vue-tools
811
2024-07-08T15:33:08.840297
MIT
false
9271531175495790c01144f683fa6c79
\n\n
.venv\Lib\site-packages\plotly\data\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
14,677
0.95
0.040373
0.003597
node-utils
794
2024-03-17T14:58:15.600359
MIT
false
f889b4dd435ab791c60c48cffb5e22ea
"""Vendored code from scikit-image in order to limit the number of dependencies\nExtracted from scikit-image/skimage/exposure/exposure.py\n"""\n\nimport numpy as np\n\nfrom warnings import warn\n\n_integer_types = (\n np.byte,\n np.ubyte, # 8 bits\n np.short,\n np.ushort, # 16 bits\n np.intc,\n np.uintc, # 16 or 32 or 64 bits\n np.int_,\n np.uint, # 32 or 64 bits\n np.longlong,\n np.ulonglong,\n) # 64 bits\n_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}\ndtype_range = {\n np.bool_: (False, True),\n np.float16: (-1, 1),\n np.float32: (-1, 1),\n np.float64: (-1, 1),\n}\ndtype_range.update(_integer_ranges)\n\n\nDTYPE_RANGE = dtype_range.copy()\nDTYPE_RANGE.update((d.__name__, limits) for d, limits in dtype_range.items())\nDTYPE_RANGE.update(\n {\n "uint10": (0, 2**10 - 1),\n "uint12": (0, 2**12 - 1),\n "uint14": (0, 2**14 - 1),\n "bool": dtype_range[np.bool_],\n "float": dtype_range[np.float64],\n }\n)\n\n\ndef intensity_range(image, range_values="image", clip_negative=False):\n """Return image intensity range (min, max) based on desired value type.\n\n Parameters\n ----------\n image : array\n Input image.\n range_values : str or 2-tuple, optional\n The image intensity range is configured by this parameter.\n The possible values for this parameter are enumerated below.\n\n 'image'\n Return image min/max as the range.\n 'dtype'\n Return min/max of the image's dtype as the range.\n dtype-name\n Return intensity range based on desired `dtype`. Must be valid key\n in `DTYPE_RANGE`. Note: `image` is ignored for this range type.\n 2-tuple\n Return `range_values` as min/max intensities. Note that there's no\n reason to use this function if you just want to specify the\n intensity range explicitly. This option is included for functions\n that use `intensity_range` to support all desired range types.\n\n clip_negative : bool, optional\n If True, clip the negative range (i.e. return 0 for min intensity)\n even if the image dtype allows negative values.\n """\n if range_values == "dtype":\n range_values = image.dtype.type\n\n if range_values == "image":\n i_min = np.min(image)\n i_max = np.max(image)\n elif range_values in DTYPE_RANGE:\n i_min, i_max = DTYPE_RANGE[range_values]\n if clip_negative:\n i_min = 0\n else:\n i_min, i_max = range_values\n return i_min, i_max\n\n\ndef _output_dtype(dtype_or_range):\n """Determine the output dtype for rescale_intensity.\n\n The dtype is determined according to the following rules:\n - if ``dtype_or_range`` is a dtype, that is the output dtype.\n - if ``dtype_or_range`` is a dtype string, that is the dtype used, unless\n it is not a NumPy data type (e.g. 'uint12' for 12-bit unsigned integers),\n in which case the data type that can contain it will be used\n (e.g. uint16 in this case).\n - if ``dtype_or_range`` is a pair of values, the output data type will be\n float.\n\n Parameters\n ----------\n dtype_or_range : type, string, or 2-tuple of int/float\n The desired range for the output, expressed as either a NumPy dtype or\n as a (min, max) pair of numbers.\n\n Returns\n -------\n out_dtype : type\n The data type appropriate for the desired output.\n """\n if type(dtype_or_range) in [list, tuple, np.ndarray]:\n # pair of values: always return float.\n return np.float_\n if isinstance(dtype_or_range, type):\n # already a type: return it\n return dtype_or_range\n if dtype_or_range in DTYPE_RANGE:\n # string key in DTYPE_RANGE dictionary\n try:\n # if it's a canonical numpy dtype, convert\n return np.dtype(dtype_or_range).type\n except TypeError: # uint10, uint12, uint14\n # otherwise, return uint16\n return np.uint16\n else:\n raise ValueError(\n "Incorrect value for out_range, should be a valid image data "\n "type or a pair of values, got %s." % str(dtype_or_range)\n )\n\n\ndef rescale_intensity(image, in_range="image", out_range="dtype"):\n """Return image after stretching or shrinking its intensity levels.\n\n The desired intensity range of the input and output, `in_range` and\n `out_range` respectively, are used to stretch or shrink the intensity range\n of the input image. See examples below.\n\n Parameters\n ----------\n image : array\n Image array.\n in_range, out_range : str or 2-tuple, optional\n Min and max intensity values of input and output image.\n The possible values for this parameter are enumerated below.\n\n 'image'\n Use image min/max as the intensity range.\n 'dtype'\n Use min/max of the image's dtype as the intensity range.\n dtype-name\n Use intensity range based on desired `dtype`. Must be valid key\n in `DTYPE_RANGE`.\n 2-tuple\n Use `range_values` as explicit min/max intensities.\n\n Returns\n -------\n out : array\n Image array after rescaling its intensity. This image is the same dtype\n as the input image.\n\n Notes\n -----\n .. versionchanged:: 0.17\n The dtype of the output array has changed to match the output dtype, or\n float if the output range is specified by a pair of floats.\n\n See Also\n --------\n equalize_hist\n\n Examples\n --------\n By default, the min/max intensities of the input image are stretched to\n the limits allowed by the image's dtype, since `in_range` defaults to\n 'image' and `out_range` defaults to 'dtype':\n\n >>> image = np.array([51, 102, 153], dtype=np.uint8)\n >>> rescale_intensity(image)\n array([ 0, 127, 255], dtype=uint8)\n\n It's easy to accidentally convert an image dtype from uint8 to float:\n\n >>> 1.0 * image\n array([ 51., 102., 153.])\n\n Use `rescale_intensity` to rescale to the proper range for float dtypes:\n\n >>> image_float = 1.0 * image\n >>> rescale_intensity(image_float)\n array([0. , 0.5, 1. ])\n\n To maintain the low contrast of the original, use the `in_range` parameter:\n\n >>> rescale_intensity(image_float, in_range=(0, 255))\n array([0.2, 0.4, 0.6])\n\n If the min/max value of `in_range` is more/less than the min/max image\n intensity, then the intensity levels are clipped:\n\n >>> rescale_intensity(image_float, in_range=(0, 102))\n array([0.5, 1. , 1. ])\n\n If you have an image with signed integers but want to rescale the image to\n just the positive range, use the `out_range` parameter. In that case, the\n output dtype will be float:\n\n >>> image = np.array([-10, 0, 10], dtype=np.int8)\n >>> rescale_intensity(image, out_range=(0, 127))\n array([ 0. , 63.5, 127. ])\n\n To get the desired range with a specific dtype, use ``.astype()``:\n\n >>> rescale_intensity(image, out_range=(0, 127)).astype(np.int8)\n array([ 0, 63, 127], dtype=int8)\n\n If the input image is constant, the output will be clipped directly to the\n output range:\n >>> image = np.array([130, 130, 130], dtype=np.int32)\n >>> rescale_intensity(image, out_range=(0, 127)).astype(np.int32)\n array([127, 127, 127], dtype=int32)\n """\n if out_range in ["dtype", "image"]:\n out_dtype = _output_dtype(image.dtype.type)\n else:\n out_dtype = _output_dtype(out_range)\n\n imin, imax = map(float, intensity_range(image, in_range))\n omin, omax = map(\n float, intensity_range(image, out_range, clip_negative=(imin >= 0))\n )\n\n if np.any(np.isnan([imin, imax, omin, omax])):\n warn(\n "One or more intensity levels are NaN. Rescaling will broadcast "\n "NaN to the full image. Provide intensity levels yourself to "\n "avoid this. E.g. with np.nanmin(image), np.nanmax(image).",\n stacklevel=2,\n )\n\n image = np.clip(image, imin, imax)\n\n if imin != imax:\n image = (image - imin) / (imax - imin)\n return np.asarray(image * (omax - omin) + omin, dtype=out_dtype)\n else:\n return np.clip(image, omin, omax).astype(out_dtype)\n
.venv\Lib\site-packages\plotly\express\imshow_utils.py
imshow_utils.py
Python
8,291
0.95
0.137652
0.02451
python-kit
636
2024-07-15T15:27:27.769147
Apache-2.0
false
2d01d7d2351da06080f5150adbbc9288
from warnings import warn\n\nfrom ._core import make_figure\nfrom ._doc import make_docstring\nimport plotly.graph_objs as go\n\n_wide_mode_xy_append = [\n "Either `x` or `y` can optionally be a list of column references or array_likes, ",\n "in which case the data will be treated as if it were 'wide' rather than 'long'.",\n]\n_cartesian_append_dict = dict(x=_wide_mode_xy_append, y=_wide_mode_xy_append)\n\n\ndef scatter(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n symbol=None,\n size=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n error_x=None,\n error_x_minus=None,\n error_y=None,\n error_y_minus=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n orientation=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n size_max=None,\n marginal_x=None,\n marginal_y=None,\n trendline=None,\n trendline_options=None,\n trendline_color_override=None,\n trendline_scope="trace",\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n render_mode="auto",\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a scatter plot, each row of `data_frame` is represented by a symbol\n mark in 2D space.\n """\n return make_figure(args=locals(), constructor=go.Scatter)\n\n\nscatter.__doc__ = make_docstring(scatter, append_dict=_cartesian_append_dict)\n\n\ndef density_contour(\n data_frame=None,\n x=None,\n y=None,\n z=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n orientation=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n marginal_x=None,\n marginal_y=None,\n trendline=None,\n trendline_options=None,\n trendline_color_override=None,\n trendline_scope="trace",\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n histfunc=None,\n histnorm=None,\n nbinsx=None,\n nbinsy=None,\n text_auto=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a density contour plot, rows of `data_frame` are grouped together\n into contour marks to visualize the 2D distribution of an aggregate\n function `histfunc` (e.g. the count or sum) of the value `z`.\n """\n return make_figure(\n args=locals(),\n constructor=go.Histogram2dContour,\n trace_patch=dict(\n contours=dict(coloring="none"),\n histfunc=histfunc,\n histnorm=histnorm,\n nbinsx=nbinsx,\n nbinsy=nbinsy,\n xbingroup="x",\n ybingroup="y",\n ),\n )\n\n\ndensity_contour.__doc__ = make_docstring(\n density_contour,\n append_dict=dict(\n x=_wide_mode_xy_append,\n y=_wide_mode_xy_append,\n z=[\n "For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",\n ],\n histfunc=["The arguments to this function are the values of `z`."],\n ),\n)\n\n\ndef density_heatmap(\n data_frame=None,\n x=None,\n y=None,\n z=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n orientation=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n marginal_x=None,\n marginal_y=None,\n opacity=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n histfunc=None,\n histnorm=None,\n nbinsx=None,\n nbinsy=None,\n text_auto=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a density heatmap, rows of `data_frame` are grouped together into\n colored rectangular tiles to visualize the 2D distribution of an\n aggregate function `histfunc` (e.g. the count or sum) of the value `z`.\n """\n return make_figure(\n args=locals(),\n constructor=go.Histogram2d,\n trace_patch=dict(\n histfunc=histfunc,\n histnorm=histnorm,\n nbinsx=nbinsx,\n nbinsy=nbinsy,\n xbingroup="x",\n ybingroup="y",\n ),\n )\n\n\ndensity_heatmap.__doc__ = make_docstring(\n density_heatmap,\n append_dict=dict(\n x=_wide_mode_xy_append,\n y=_wide_mode_xy_append,\n z=[\n "For `density_heatmap` and `density_contour` these values are used as the inputs to `histfunc`.",\n ],\n histfunc=[\n "The arguments to this function are the values of `z`.",\n ],\n ),\n)\n\n\ndef line(\n data_frame=None,\n x=None,\n y=None,\n line_group=None,\n color=None,\n line_dash=None,\n symbol=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n error_x=None,\n error_x_minus=None,\n error_y=None,\n error_y_minus=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n orientation=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n line_shape=None,\n render_mode="auto",\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a 2D line plot, each row of `data_frame` is represented as a vertex of\n a polyline mark in 2D space.\n """\n return make_figure(args=locals(), constructor=go.Scatter)\n\n\nline.__doc__ = make_docstring(line, append_dict=_cartesian_append_dict)\n\n\ndef area(\n data_frame=None,\n x=None,\n y=None,\n line_group=None,\n color=None,\n pattern_shape=None,\n symbol=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n pattern_shape_sequence=None,\n pattern_shape_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n orientation=None,\n groupnorm=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n line_shape=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a stacked area plot, each row of `data_frame` is represented as\n a vertex of a polyline mark in 2D space. The area between\n successive polylines is filled.\n """\n return make_figure(\n args=locals(),\n constructor=go.Scatter,\n trace_patch=dict(stackgroup=1, mode="lines", groupnorm=groupnorm),\n )\n\n\narea.__doc__ = make_docstring(area, append_dict=_cartesian_append_dict)\n\n\ndef bar(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n pattern_shape=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n base=None,\n error_x=None,\n error_x_minus=None,\n error_y=None,\n error_y_minus=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n pattern_shape_sequence=None,\n pattern_shape_map=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n orientation=None,\n barmode="relative",\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n text_auto=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a bar plot, each row of `data_frame` is represented as a rectangular\n mark.\n """\n return make_figure(\n args=locals(),\n constructor=go.Bar,\n trace_patch=dict(textposition="auto"),\n layout_patch=dict(barmode=barmode),\n )\n\n\nbar.__doc__ = make_docstring(bar, append_dict=_cartesian_append_dict)\n\n\ndef timeline(\n data_frame=None,\n x_start=None,\n x_end=None,\n y=None,\n color=None,\n pattern_shape=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n pattern_shape_sequence=None,\n pattern_shape_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n range_x=None,\n range_y=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a timeline plot, each row of `data_frame` is represented as a rectangular\n mark on an x axis of type `date`, spanning from `x_start` to `x_end`.\n """\n return make_figure(\n args=locals(),\n constructor="timeline",\n trace_patch=dict(textposition="auto", orientation="h"),\n layout_patch=dict(barmode="overlay"),\n )\n\n\ntimeline.__doc__ = make_docstring(timeline)\n\n\ndef histogram(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n pattern_shape=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n pattern_shape_sequence=None,\n pattern_shape_map=None,\n marginal=None,\n opacity=None,\n orientation=None,\n barmode="relative",\n barnorm=None,\n histnorm=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n histfunc=None,\n cumulative=None,\n nbins=None,\n text_auto=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a histogram, rows of `data_frame` are grouped together into a\n rectangular mark to visualize the 1D distribution of an aggregate\n function `histfunc` (e.g. the count or sum) of the value `y` (or `x` if\n `orientation` is `'h'`).\n """\n return make_figure(\n args=locals(),\n constructor=go.Histogram,\n trace_patch=dict(\n histnorm=histnorm,\n histfunc=histfunc,\n cumulative=dict(enabled=cumulative),\n ),\n layout_patch=dict(barmode=barmode, barnorm=barnorm),\n )\n\n\nhistogram.__doc__ = make_docstring(\n histogram,\n append_dict=dict(\n x=["If `orientation` is `'h'`, these values are used as inputs to `histfunc`."]\n + _wide_mode_xy_append,\n y=["If `orientation` is `'v'`, these values are used as inputs to `histfunc`."]\n + _wide_mode_xy_append,\n histfunc=[\n "The arguments to this function are the values of `y` (`x`) if `orientation` is `'v'` (`'h'`).",\n ],\n ),\n)\n\n\ndef ecdf(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n text=None,\n line_dash=None,\n symbol=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n animation_frame=None,\n animation_group=None,\n markers=False,\n lines=True,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n marginal=None,\n opacity=None,\n orientation=None,\n ecdfnorm="probability",\n ecdfmode="standard",\n render_mode="auto",\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a Empirical Cumulative Distribution Function (ECDF) plot, rows of `data_frame`\n are sorted by the value `x` (or `y` if `orientation` is `'h'`) and their cumulative\n count (or the cumulative sum of `y` if supplied and `orientation` is `h`) is drawn\n as a line.\n """\n return make_figure(args=locals(), constructor=go.Scatter)\n\n\necdf.__doc__ = make_docstring(\n ecdf,\n append_dict=dict(\n x=[\n "If `orientation` is `'h'`, the cumulative sum of this argument is plotted rather than the cumulative count."\n ]\n + _wide_mode_xy_append,\n y=[\n "If `orientation` is `'v'`, the cumulative sum of this argument is plotted rather than the cumulative count."\n ]\n + _wide_mode_xy_append,\n ),\n)\n\n\ndef violin(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n orientation=None,\n violinmode=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n points=None,\n box=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a violin plot, rows of `data_frame` are grouped together into a\n curved mark to visualize their distribution.\n """\n return make_figure(\n args=locals(),\n constructor=go.Violin,\n trace_patch=dict(\n points=points,\n box=dict(visible=box),\n scalegroup=True,\n x0=" ",\n y0=" ",\n ),\n layout_patch=dict(violinmode=violinmode),\n )\n\n\nviolin.__doc__ = make_docstring(violin, append_dict=_cartesian_append_dict)\n\n\ndef box(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n orientation=None,\n boxmode=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n points=None,\n notched=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a box plot, rows of `data_frame` are grouped together into a\n box-and-whisker mark to visualize their distribution.\n\n Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second\n quartile (Q2) is marked by a line inside the box. By default, the\n whiskers correspond to the box' edges +/- 1.5 times the interquartile\n range (IQR: Q3-Q1), see "points" for other options.\n """\n return make_figure(\n args=locals(),\n constructor=go.Box,\n trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "),\n layout_patch=dict(boxmode=boxmode),\n )\n\n\nbox.__doc__ = make_docstring(box, append_dict=_cartesian_append_dict)\n\n\ndef strip(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n orientation=None,\n stripmode=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a strip plot each row of `data_frame` is represented as a jittered\n mark within categories.\n """\n return make_figure(\n args=locals(),\n constructor=go.Box,\n trace_patch=dict(\n boxpoints="all",\n pointpos=0,\n hoveron="points",\n fillcolor="rgba(255,255,255,0)",\n line={"color": "rgba(255,255,255,0)"},\n x0=" ",\n y0=" ",\n ),\n layout_patch=dict(boxmode=stripmode),\n )\n\n\nstrip.__doc__ = make_docstring(strip, append_dict=_cartesian_append_dict)\n\n\ndef scatter_3d(\n data_frame=None,\n x=None,\n y=None,\n z=None,\n color=None,\n symbol=None,\n size=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n error_x=None,\n error_x_minus=None,\n error_y=None,\n error_y_minus=None,\n error_z=None,\n error_z_minus=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n size_max=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n log_x=False,\n log_y=False,\n log_z=False,\n range_x=None,\n range_y=None,\n range_z=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a 3D scatter plot, each row of `data_frame` is represented by a\n symbol mark in 3D space.\n """\n return make_figure(args=locals(), constructor=go.Scatter3d)\n\n\nscatter_3d.__doc__ = make_docstring(scatter_3d)\n\n\ndef line_3d(\n data_frame=None,\n x=None,\n y=None,\n z=None,\n color=None,\n line_dash=None,\n text=None,\n line_group=None,\n symbol=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n error_x=None,\n error_x_minus=None,\n error_y=None,\n error_y_minus=None,\n error_z=None,\n error_z_minus=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n log_x=False,\n log_y=False,\n log_z=False,\n range_x=None,\n range_y=None,\n range_z=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a 3D line plot, each row of `data_frame` is represented as a vertex of\n a polyline mark in 3D space.\n """\n return make_figure(args=locals(), constructor=go.Scatter3d)\n\n\nline_3d.__doc__ = make_docstring(line_3d)\n\n\ndef scatter_ternary(\n data_frame=None,\n a=None,\n b=None,\n c=None,\n color=None,\n symbol=None,\n size=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n size_max=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a ternary scatter plot, each row of `data_frame` is represented by a\n symbol mark in ternary coordinates.\n """\n return make_figure(args=locals(), constructor=go.Scatterternary)\n\n\nscatter_ternary.__doc__ = make_docstring(scatter_ternary)\n\n\ndef line_ternary(\n data_frame=None,\n a=None,\n b=None,\n c=None,\n color=None,\n line_dash=None,\n line_group=None,\n symbol=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n line_shape=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a ternary line plot, each row of `data_frame` is represented as\n a vertex of a polyline mark in ternary coordinates.\n """\n return make_figure(args=locals(), constructor=go.Scatterternary)\n\n\nline_ternary.__doc__ = make_docstring(line_ternary)\n\n\ndef scatter_polar(\n data_frame=None,\n r=None,\n theta=None,\n color=None,\n symbol=None,\n size=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n direction="clockwise",\n start_angle=90,\n size_max=None,\n range_r=None,\n range_theta=None,\n log_r=False,\n render_mode="auto",\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a polar scatter plot, each row of `data_frame` is represented by a\n symbol mark in polar coordinates.\n """\n return make_figure(args=locals(), constructor=go.Scatterpolar)\n\n\nscatter_polar.__doc__ = make_docstring(scatter_polar)\n\n\ndef line_polar(\n data_frame=None,\n r=None,\n theta=None,\n color=None,\n line_dash=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n line_group=None,\n text=None,\n symbol=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n direction="clockwise",\n start_angle=90,\n line_close=False,\n line_shape=None,\n render_mode="auto",\n range_r=None,\n range_theta=None,\n log_r=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a polar line plot, each row of `data_frame` is represented as a\n vertex of a polyline mark in polar coordinates.\n """\n return make_figure(args=locals(), constructor=go.Scatterpolar)\n\n\nline_polar.__doc__ = make_docstring(line_polar)\n\n\ndef bar_polar(\n data_frame=None,\n r=None,\n theta=None,\n color=None,\n pattern_shape=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n base=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n pattern_shape_sequence=None,\n pattern_shape_map=None,\n range_color=None,\n color_continuous_midpoint=None,\n barnorm=None,\n barmode="relative",\n direction="clockwise",\n start_angle=90,\n range_r=None,\n range_theta=None,\n log_r=False,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a polar bar plot, each row of `data_frame` is represented as a wedge\n mark in polar coordinates.\n """\n return make_figure(\n args=locals(),\n constructor=go.Barpolar,\n layout_patch=dict(barnorm=barnorm, barmode=barmode),\n )\n\n\nbar_polar.__doc__ = make_docstring(bar_polar)\n\n\ndef choropleth(\n data_frame=None,\n lat=None,\n lon=None,\n locations=None,\n locationmode=None,\n geojson=None,\n featureidkey=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n projection=None,\n scope=None,\n center=None,\n fitbounds=None,\n basemap_visible=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a choropleth map, each row of `data_frame` is represented by a\n colored region mark on a map.\n """\n return make_figure(\n args=locals(),\n constructor=go.Choropleth,\n trace_patch=dict(locationmode=locationmode),\n )\n\n\nchoropleth.__doc__ = make_docstring(choropleth)\n\n\ndef scatter_geo(\n data_frame=None,\n lat=None,\n lon=None,\n locations=None,\n locationmode=None,\n geojson=None,\n featureidkey=None,\n color=None,\n text=None,\n symbol=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n size=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n size_max=None,\n projection=None,\n scope=None,\n center=None,\n fitbounds=None,\n basemap_visible=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a geographic scatter plot, each row of `data_frame` is represented\n by a symbol mark on a map.\n """\n return make_figure(\n args=locals(),\n constructor=go.Scattergeo,\n trace_patch=dict(locationmode=locationmode),\n )\n\n\nscatter_geo.__doc__ = make_docstring(scatter_geo)\n\n\ndef line_geo(\n data_frame=None,\n lat=None,\n lon=None,\n locations=None,\n locationmode=None,\n geojson=None,\n featureidkey=None,\n color=None,\n line_dash=None,\n text=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n line_group=None,\n symbol=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n line_dash_sequence=None,\n line_dash_map=None,\n symbol_sequence=None,\n symbol_map=None,\n markers=False,\n projection=None,\n scope=None,\n center=None,\n fitbounds=None,\n basemap_visible=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a geographic line plot, each row of `data_frame` is represented as\n a vertex of a polyline mark on a map.\n """\n return make_figure(\n args=locals(),\n constructor=go.Scattergeo,\n trace_patch=dict(locationmode=locationmode),\n )\n\n\nline_geo.__doc__ = make_docstring(line_geo)\n\n\ndef scatter_map(\n data_frame=None,\n lat=None,\n lon=None,\n color=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n size=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n size_max=None,\n zoom=8,\n center=None,\n map_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a scatter map, each row of `data_frame` is represented by a\n symbol mark on the map.\n """\n return make_figure(args=locals(), constructor=go.Scattermap)\n\n\nscatter_map.__doc__ = make_docstring(scatter_map)\n\n\ndef choropleth_map(\n data_frame=None,\n geojson=None,\n featureidkey=None,\n locations=None,\n color=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n zoom=8,\n center=None,\n map_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a choropleth map, each row of `data_frame` is represented by a\n colored region on the map.\n """\n return make_figure(args=locals(), constructor=go.Choroplethmap)\n\n\nchoropleth_map.__doc__ = make_docstring(choropleth_map)\n\n\ndef density_map(\n data_frame=None,\n lat=None,\n lon=None,\n z=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n zoom=8,\n center=None,\n map_style=None,\n radius=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a density map, each row of `data_frame` contributes to the intensity of\n the color of the region around the corresponding point on the map.\n """\n return make_figure(\n args=locals(), constructor=go.Densitymap, trace_patch=dict(radius=radius)\n )\n\n\ndensity_map.__doc__ = make_docstring(density_map)\n\n\ndef line_map(\n data_frame=None,\n lat=None,\n lon=None,\n color=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n line_group=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n zoom=8,\n center=None,\n map_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a line map, each row of `data_frame` is represented as\n a vertex of a polyline mark on the map.\n """\n return make_figure(args=locals(), constructor=go.Scattermap)\n\n\nline_map.__doc__ = make_docstring(line_map)\n\n\ndef scatter_mapbox(\n data_frame=None,\n lat=None,\n lon=None,\n color=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n size=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n size_max=None,\n zoom=8,\n center=None,\n mapbox_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n *scatter_mapbox* is deprecated! Use *scatter_map* instead.\n Learn more at: https://plotly.com/python/mapbox-to-maplibre/\n In a Mapbox scatter plot, each row of `data_frame` is represented by a\n symbol mark on a Mapbox map.\n """\n warn(\n "*scatter_mapbox* is deprecated!"\n + " Use *scatter_map* instead."\n + " Learn more at: https://plotly.com/python/mapbox-to-maplibre/",\n stacklevel=2,\n category=DeprecationWarning,\n )\n return make_figure(args=locals(), constructor=go.Scattermapbox)\n\n\nscatter_mapbox.__doc__ = make_docstring(scatter_mapbox)\n\n\ndef choropleth_mapbox(\n data_frame=None,\n geojson=None,\n featureidkey=None,\n locations=None,\n color=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n zoom=8,\n center=None,\n mapbox_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n *choropleth_mapbox* is deprecated! Use *choropleth_map* instead.\n Learn more at: https://plotly.com/python/mapbox-to-maplibre/\n In a Mapbox choropleth map, each row of `data_frame` is represented by a\n colored region on a Mapbox map.\n """\n warn(\n "*choropleth_mapbox* is deprecated!"\n + " Use *choropleth_map* instead."\n + " Learn more at: https://plotly.com/python/mapbox-to-maplibre/",\n stacklevel=2,\n category=DeprecationWarning,\n )\n return make_figure(args=locals(), constructor=go.Choroplethmapbox)\n\n\nchoropleth_mapbox.__doc__ = make_docstring(choropleth_mapbox)\n\n\ndef density_mapbox(\n data_frame=None,\n lat=None,\n lon=None,\n z=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n zoom=8,\n center=None,\n mapbox_style=None,\n radius=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n *density_mapbox* is deprecated! Use *density_map* instead.\n Learn more at: https://plotly.com/python/mapbox-to-maplibre/\n In a Mapbox density map, each row of `data_frame` contributes to the intensity of\n the color of the region around the corresponding point on the map\n """\n warn(\n "*density_mapbox* is deprecated!"\n + " Use *density_map* instead."\n + " Learn more at: https://plotly.com/python/mapbox-to-maplibre/",\n stacklevel=2,\n category=DeprecationWarning,\n )\n return make_figure(\n args=locals(), constructor=go.Densitymapbox, trace_patch=dict(radius=radius)\n )\n\n\ndensity_mapbox.__doc__ = make_docstring(density_mapbox)\n\n\ndef line_mapbox(\n data_frame=None,\n lat=None,\n lon=None,\n color=None,\n text=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n line_group=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n zoom=8,\n center=None,\n mapbox_style=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n *line_mapbox* is deprecated! Use *line_map* instead.\n Learn more at: https://plotly.com/python/mapbox-to-maplibre/\n In a Mapbox line plot, each row of `data_frame` is represented as\n a vertex of a polyline mark on a Mapbox map.\n """\n warn(\n "*line_mapbox* is deprecated!"\n + " Use *line_map* instead."\n + " Learn more at: https://plotly.com/python/mapbox-to-maplibre/",\n stacklevel=2,\n category=DeprecationWarning,\n )\n return make_figure(args=locals(), constructor=go.Scattermapbox)\n\n\nline_mapbox.__doc__ = make_docstring(line_mapbox)\n\n\ndef scatter_matrix(\n data_frame=None,\n dimensions=None,\n color=None,\n symbol=None,\n size=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n symbol_sequence=None,\n symbol_map=None,\n opacity=None,\n size_max=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a scatter plot matrix (or SPLOM), each row of `data_frame` is\n represented by a multiple symbol marks, one in each cell of a grid of\n 2D scatter plots, which plot each pair of `dimensions` against each\n other.\n """\n return make_figure(\n args=locals(), constructor=go.Splom, layout_patch=dict(dragmode="select")\n )\n\n\nscatter_matrix.__doc__ = make_docstring(scatter_matrix)\n\n\ndef parallel_coordinates(\n data_frame=None,\n dimensions=None,\n color=None,\n labels=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a parallel coordinates plot, each row of `data_frame` is represented\n by a polyline mark which traverses a set of parallel axes, one for each\n of the `dimensions`.\n """\n return make_figure(args=locals(), constructor=go.Parcoords)\n\n\nparallel_coordinates.__doc__ = make_docstring(parallel_coordinates)\n\n\ndef parallel_categories(\n data_frame=None,\n dimensions=None,\n color=None,\n labels=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n dimensions_max_cardinality=50,\n) -> go.Figure:\n """\n In a parallel categories (or parallel sets) plot, each row of\n `data_frame` is grouped with other rows that share the same values of\n `dimensions` and then plotted as a polyline mark through a set of\n parallel axes, one for each of the `dimensions`.\n """\n return make_figure(args=locals(), constructor=go.Parcats)\n\n\nparallel_categories.__doc__ = make_docstring(parallel_categories)\n\n\ndef pie(\n data_frame=None,\n names=None,\n values=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n category_orders=None,\n labels=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n opacity=None,\n hole=None,\n) -> go.Figure:\n """\n In a pie plot, each row of `data_frame` is represented as a sector of a\n pie.\n """\n if color_discrete_sequence is not None:\n layout_patch = {"piecolorway": color_discrete_sequence}\n else:\n layout_patch = {}\n return make_figure(\n args=locals(),\n constructor=go.Pie,\n trace_patch=dict(showlegend=(names is not None), hole=hole),\n layout_patch=layout_patch,\n )\n\n\npie.__doc__ = make_docstring(\n pie,\n override_dict=dict(\n hole=[\n "float",\n "Sets the fraction of the radius to cut out of the pie."\n "Use this to make a donut chart.",\n ],\n ),\n)\n\n\ndef sunburst(\n data_frame=None,\n names=None,\n values=None,\n parents=None,\n path=None,\n ids=None,\n color=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n labels=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n branchvalues=None,\n maxdepth=None,\n) -> go.Figure:\n """\n A sunburst plot represents hierarchial data as sectors laid out over\n several levels of concentric rings.\n """\n if color_discrete_sequence is not None:\n layout_patch = {"sunburstcolorway": color_discrete_sequence}\n else:\n layout_patch = {}\n if path is not None and (ids is not None or parents is not None):\n raise ValueError(\n "Either `path` should be provided, or `ids` and `parents`."\n "These parameters are mutually exclusive and cannot be passed together."\n )\n if path is not None and branchvalues is None:\n branchvalues = "total"\n return make_figure(\n args=locals(),\n constructor=go.Sunburst,\n trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),\n layout_patch=layout_patch,\n )\n\n\nsunburst.__doc__ = make_docstring(sunburst)\n\n\ndef treemap(\n data_frame=None,\n names=None,\n values=None,\n parents=None,\n ids=None,\n path=None,\n color=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n labels=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n branchvalues=None,\n maxdepth=None,\n) -> go.Figure:\n """\n A treemap plot represents hierarchial data as nested rectangular\n sectors.\n """\n if color_discrete_sequence is not None:\n layout_patch = {"treemapcolorway": color_discrete_sequence}\n else:\n layout_patch = {}\n if path is not None and (ids is not None or parents is not None):\n raise ValueError(\n "Either `path` should be provided, or `ids` and `parents`."\n "These parameters are mutually exclusive and cannot be passed together."\n )\n if path is not None and branchvalues is None:\n branchvalues = "total"\n return make_figure(\n args=locals(),\n constructor=go.Treemap,\n trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),\n layout_patch=layout_patch,\n )\n\n\ntreemap.__doc__ = make_docstring(treemap)\n\n\ndef icicle(\n data_frame=None,\n names=None,\n values=None,\n parents=None,\n path=None,\n ids=None,\n color=None,\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n labels=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n branchvalues=None,\n maxdepth=None,\n) -> go.Figure:\n """\n An icicle plot represents hierarchial data with adjoined rectangular\n sectors that all cascade from root down to leaf in one direction.\n """\n if color_discrete_sequence is not None:\n layout_patch = {"iciclecolorway": color_discrete_sequence}\n else:\n layout_patch = {}\n if path is not None and (ids is not None or parents is not None):\n raise ValueError(\n "Either `path` should be provided, or `ids` and `parents`."\n "These parameters are mutually exclusive and cannot be passed together."\n )\n if path is not None and branchvalues is None:\n branchvalues = "total"\n return make_figure(\n args=locals(),\n constructor=go.Icicle,\n trace_patch=dict(branchvalues=branchvalues, maxdepth=maxdepth),\n layout_patch=layout_patch,\n )\n\n\nicicle.__doc__ = make_docstring(icicle)\n\n\ndef funnel(\n data_frame=None,\n x=None,\n y=None,\n color=None,\n facet_row=None,\n facet_col=None,\n facet_col_wrap=0,\n facet_row_spacing=None,\n facet_col_spacing=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n text=None,\n animation_frame=None,\n animation_group=None,\n category_orders=None,\n labels=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n opacity=None,\n orientation=None,\n log_x=False,\n log_y=False,\n range_x=None,\n range_y=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n) -> go.Figure:\n """\n In a funnel plot, each row of `data_frame` is represented as a\n rectangular sector of a funnel.\n """\n return make_figure(args=locals(), constructor=go.Funnel)\n\n\nfunnel.__doc__ = make_docstring(funnel, append_dict=_cartesian_append_dict)\n\n\ndef funnel_area(\n data_frame=None,\n names=None,\n values=None,\n color=None,\n color_discrete_sequence=None,\n color_discrete_map=None,\n hover_name=None,\n hover_data=None,\n custom_data=None,\n labels=None,\n title=None,\n subtitle=None,\n template=None,\n width=None,\n height=None,\n opacity=None,\n) -> go.Figure:\n """\n In a funnel area plot, each row of `data_frame` is represented as a\n trapezoidal sector of a funnel.\n """\n if color_discrete_sequence is not None:\n layout_patch = {"funnelareacolorway": color_discrete_sequence}\n else:\n layout_patch = {}\n return make_figure(\n args=locals(),\n constructor=go.Funnelarea,\n trace_patch=dict(showlegend=(names is not None)),\n layout_patch=layout_patch,\n )\n\n\nfunnel_area.__doc__ = make_docstring(funnel_area)\n
.venv\Lib\site-packages\plotly\express\_chart_types.py
_chart_types.py
Python
45,581
0.95
0.032821
0.002233
react-lib
32
2025-06-22T16:14:42.672868
GPL-3.0
false
e0abaee969ebaa41219e00bdee1ff946
import inspect\nfrom textwrap import TextWrapper\n\ntry:\n getfullargspec = inspect.getfullargspec\nexcept AttributeError: # python 2\n getfullargspec = inspect.getargspec\n\n\ncolref_type = "str or int or Series or array-like"\ncolref_desc = "Either a name of a column in `data_frame`, or a pandas Series or array_like object."\ncolref_list_type = "list of str or int, or Series or array-like"\ncolref_list_desc = (\n "Either names of columns in `data_frame`, or pandas Series, or array_like objects"\n)\n\ndocs = dict(\n data_frame=[\n "DataFrame or array-like or dict",\n "This argument needs to be passed for column names (and not keyword names) to be used.",\n "Array-like and dict are transformed internally to a pandas DataFrame.",\n "Optional: if missing, a DataFrame gets constructed under the hood using the other arguments.",\n ],\n x=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",\n ],\n y=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the y axis in cartesian coordinates.",\n ],\n z=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the z axis in cartesian coordinates.",\n ],\n x_start=[\n colref_type,\n colref_desc,\n "(required)",\n "Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",\n ],\n x_end=[\n colref_type,\n colref_desc,\n "(required)",\n "Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",\n ],\n a=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the a axis in ternary coordinates.",\n ],\n b=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the b axis in ternary coordinates.",\n ],\n c=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the c axis in ternary coordinates.",\n ],\n r=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the radial axis in polar coordinates.",\n ],\n theta=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks along the angular axis in polar coordinates.",\n ],\n values=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to set values associated to sectors.",\n ],\n parents=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used as parents in sunburst and treemap charts.",\n ],\n ids=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to set ids of sectors",\n ],\n path=[\n colref_list_type,\n colref_list_desc,\n "List of columns names or columns of a rectangular dataframe defining the hierarchy of sectors, from root to leaves.",\n "An error is raised if path AND ids or parents is passed",\n ],\n lat=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks according to latitude on a map.",\n ],\n lon=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position marks according to longitude on a map.",\n ],\n locations=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are to be interpreted according to `locationmode` and mapped to longitude/latitude.",\n ],\n base=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to position the base of the bar.",\n ],\n dimensions=[\n colref_list_type,\n colref_list_desc,\n "Values from these columns are used for multidimensional visualization.",\n ],\n dimensions_max_cardinality=[\n "int (default 50)",\n "When `dimensions` is `None` and `data_frame` is provided, "\n "columns with more than this number of unique values are excluded from the output.",\n "Not used when `dimensions` is passed.",\n ],\n error_x=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size x-axis error bars.",\n "If `error_x_minus` is `None`, error bars will be symmetrical, otherwise `error_x` is used for the positive direction only.",\n ],\n error_x_minus=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size x-axis error bars in the negative direction.",\n "Ignored if `error_x` is `None`.",\n ],\n error_y=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size y-axis error bars.",\n "If `error_y_minus` is `None`, error bars will be symmetrical, otherwise `error_y` is used for the positive direction only.",\n ],\n error_y_minus=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size y-axis error bars in the negative direction.",\n "Ignored if `error_y` is `None`.",\n ],\n error_z=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size z-axis error bars.",\n "If `error_z_minus` is `None`, error bars will be symmetrical, otherwise `error_z` is used for the positive direction only.",\n ],\n error_z_minus=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to size z-axis error bars in the negative direction.",\n "Ignored if `error_z` is `None`.",\n ],\n color=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign color to marks.",\n ],\n opacity=["float", "Value between 0 and 1. Sets the opacity for markers."],\n line_dash=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign dash-patterns to lines.",\n ],\n line_group=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to group rows of `data_frame` into lines.",\n ],\n symbol=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign symbols to marks.",\n ],\n pattern_shape=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign pattern shapes to marks.",\n ],\n size=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign mark sizes.",\n ],\n radius=["int (default is 30)", "Sets the radius of influence of each point."],\n hover_name=[\n colref_type,\n colref_desc,\n "Values from this column or array_like appear in bold in the hover tooltip.",\n ],\n hover_data=[\n "str, or list of str or int, or Series or array-like, or dict",\n "Either a name or list of names of columns in `data_frame`, or pandas Series,",\n "or array_like objects",\n "or a dict with column names as keys, with values True (for default formatting)",\n "False (in order to remove this column from hover information),",\n "or a formatting string, for example ':.3f' or '|%a'",\n "or list-like data to appear in the hover tooltip",\n "or tuples with a bool or formatting string as first element,",\n "and list-like data to appear in hover as second element",\n "Values from these columns appear as extra data in the hover tooltip.",\n ],\n custom_data=[\n "str, or list of str or int, or Series or array-like",\n "Either name or list of names of columns in `data_frame`, or pandas Series, or array_like objects",\n "Values from these columns are extra data, to be used in widgets or Dash callbacks for example. This data is not user-visible but is included in events emitted by the figure (lasso selection etc.)",\n ],\n text=[\n colref_type,\n colref_desc,\n "Values from this column or array_like appear in the figure as text labels.",\n ],\n names=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used as labels for sectors.",\n ],\n locationmode=[\n "str",\n "One of 'ISO-3', 'USA-states', or 'country names'",\n "Determines the set of locations used to match entries in `locations` to regions on the map.",\n ],\n facet_row=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign marks to facetted subplots in the vertical direction.",\n ],\n facet_col=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign marks to facetted subplots in the horizontal direction.",\n ],\n facet_col_wrap=[\n "int",\n "Maximum number of facet columns.",\n "Wraps the column variable at this width, so that the column facets span multiple rows.",\n "Ignored if 0, and forced to 0 if `facet_row` or a `marginal` is set.",\n ],\n facet_row_spacing=[\n "float between 0 and 1",\n "Spacing between facet rows, in paper units. Default is 0.03 or 0.07 when facet_col_wrap is used.",\n ],\n facet_col_spacing=[\n "float between 0 and 1",\n "Spacing between facet columns, in paper units Default is 0.02.",\n ],\n animation_frame=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to assign marks to animation frames.",\n ],\n animation_group=[\n colref_type,\n colref_desc,\n "Values from this column or array_like are used to provide object-constancy across animation frames: rows with matching `animation_group`s will be treated as if they describe the same object in each frame.",\n ],\n symbol_sequence=[\n "list of str",\n "Strings should define valid plotly.js symbols.",\n "When `symbol` is set, values in that column are assigned symbols by cycling through `symbol_sequence` in the order described in `category_orders`, unless the value of `symbol` is a key in `symbol_map`.",\n ],\n symbol_map=[\n "dict with str keys and str values (default `{}`)",\n "String values should define plotly.js symbols",\n "Used to override `symbol_sequence` to assign a specific symbols to marks corresponding with specific values.",\n "Keys in `symbol_map` should be values in the column denoted by `symbol`.",\n "Alternatively, if the values of `symbol` are valid symbol names, the string `'identity'` may be passed to cause them to be used directly.",\n ],\n line_dash_map=[\n "dict with str keys and str values (default `{}`)",\n "Strings values define plotly.js dash-patterns.",\n "Used to override `line_dash_sequences` to assign a specific dash-patterns to lines corresponding with specific values.",\n "Keys in `line_dash_map` should be values in the column denoted by `line_dash`.",\n "Alternatively, if the values of `line_dash` are valid line-dash names, the string `'identity'` may be passed to cause them to be used directly.",\n ],\n line_dash_sequence=[\n "list of str",\n "Strings should define valid plotly.js dash-patterns.",\n "When `line_dash` is set, values in that column are assigned dash-patterns by cycling through `line_dash_sequence` in the order described in `category_orders`, unless the value of `line_dash` is a key in `line_dash_map`.",\n ],\n pattern_shape_map=[\n "dict with str keys and str values (default `{}`)",\n "Strings values define plotly.js patterns-shapes.",\n "Used to override `pattern_shape_sequences` to assign a specific patterns-shapes to lines corresponding with specific values.",\n "Keys in `pattern_shape_map` should be values in the column denoted by `pattern_shape`.",\n "Alternatively, if the values of `pattern_shape` are valid patterns-shapes names, the string `'identity'` may be passed to cause them to be used directly.",\n ],\n pattern_shape_sequence=[\n "list of str",\n "Strings should define valid plotly.js patterns-shapes.",\n "When `pattern_shape` is set, values in that column are assigned patterns-shapes by cycling through `pattern_shape_sequence` in the order described in `category_orders`, unless the value of `pattern_shape` is a key in `pattern_shape_map`.",\n ],\n color_discrete_sequence=[\n "list of str",\n "Strings should define valid CSS-colors.",\n "When `color` is set and the values in the corresponding column are not numeric, values in that column are assigned colors by cycling through `color_discrete_sequence` in the order described in `category_orders`, unless the value of `color` is a key in `color_discrete_map`.",\n "Various useful color sequences are available in the `plotly.express.colors` submodules, specifically `plotly.express.colors.qualitative`.",\n ],\n color_discrete_map=[\n "dict with str keys and str values (default `{}`)",\n "String values should define valid CSS-colors",\n "Used to override `color_discrete_sequence` to assign a specific colors to marks corresponding with specific values.",\n "Keys in `color_discrete_map` should be values in the column denoted by `color`.",\n "Alternatively, if the values of `color` are valid colors, the string `'identity'` may be passed to cause them to be used directly.",\n ],\n color_continuous_scale=[\n "list of str",\n "Strings should define valid CSS-colors",\n "This list is used to build a continuous color scale when the column denoted by `color` contains numeric data.",\n "Various useful color scales are available in the `plotly.express.colors` submodules, specifically `plotly.express.colors.sequential`, `plotly.express.colors.diverging` and `plotly.express.colors.cyclical`.",\n ],\n color_continuous_midpoint=[\n "number (default `None`)",\n "If set, computes the bounds of the continuous color scale to have the desired midpoint.",\n "Setting this value is recommended when using `plotly.express.colors.diverging` color scales as the inputs to `color_continuous_scale`.",\n ],\n size_max=["int (default `20`)", "Set the maximum mark size when using `size`."],\n markers=["boolean (default `False`)", "If `True`, markers are shown on lines."],\n lines=[\n "boolean (default `True`)",\n "If `False`, lines are not drawn (forced to `True` if `markers` is `False`).",\n ],\n log_x=[\n "boolean (default `False`)",\n "If `True`, the x-axis is log-scaled in cartesian coordinates.",\n ],\n log_y=[\n "boolean (default `False`)",\n "If `True`, the y-axis is log-scaled in cartesian coordinates.",\n ],\n log_z=[\n "boolean (default `False`)",\n "If `True`, the z-axis is log-scaled in cartesian coordinates.",\n ],\n log_r=[\n "boolean (default `False`)",\n "If `True`, the radial axis is log-scaled in polar coordinates.",\n ],\n range_x=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the x-axis in cartesian coordinates.",\n ],\n range_y=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the y-axis in cartesian coordinates.",\n ],\n range_z=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the z-axis in cartesian coordinates.",\n ],\n range_color=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the continuous color scale.",\n ],\n range_r=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the radial axis in polar coordinates.",\n ],\n range_theta=[\n "list of two numbers",\n "If provided, overrides auto-scaling on the angular axis in polar coordinates.",\n ],\n title=["str", "The figure title."],\n subtitle=["str", "The figure subtitle."],\n template=[\n "str or dict or plotly.graph_objects.layout.Template instance",\n "The figure template name (must be a key in plotly.io.templates) or definition.",\n ],\n width=["int (default `None`)", "The figure width in pixels."],\n height=["int (default `None`)", "The figure height in pixels."],\n labels=[\n "dict with str keys and str values (default `{}`)",\n "By default, column names are used in the figure for axis titles, legend entries and hovers.",\n "This parameter allows this to be overridden.",\n "The keys of this dict should correspond to column names, and the values should correspond to the desired label to be displayed.",\n ],\n category_orders=[\n "dict with str keys and list of str values (default `{}`)",\n "By default, in Python 3.6+, the order of categorical values in axes, legends and facets depends on the order in which these values are first encountered in `data_frame` (and no order is guaranteed by default in Python below 3.6).",\n "This parameter is used to force a specific ordering of values per column.",\n "The keys of this dict should correspond to column names, and the values should be lists of strings corresponding to the specific display order desired.",\n ],\n marginal=[\n "str",\n "One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",\n "If set, a subplot is drawn alongside the main plot, visualizing the distribution.",\n ],\n marginal_x=[\n "str",\n "One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",\n "If set, a horizontal subplot is drawn above the main plot, visualizing the x-distribution.",\n ],\n marginal_y=[\n "str",\n "One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",\n "If set, a vertical subplot is drawn to the right of the main plot, visualizing the y-distribution.",\n ],\n trendline=[\n "str",\n "One of `'ols'`, `'lowess'`, `'rolling'`, `'expanding'` or `'ewm'`.",\n "If `'ols'`, an Ordinary Least Squares regression line will be drawn for each discrete-color/symbol group.",\n "If `'lowess`', a Locally Weighted Scatterplot Smoothing line will be drawn for each discrete-color/symbol group.",\n "If `'rolling`', a Rolling (e.g. rolling average, rolling median) line will be drawn for each discrete-color/symbol group.",\n "If `'expanding`', an Expanding (e.g. expanding average, expanding sum) line will be drawn for each discrete-color/symbol group.",\n "If `'ewm`', an Exponentially Weighted Moment (e.g. exponentially-weighted moving average) line will be drawn for each discrete-color/symbol group.",\n "See the docstrings for the functions in `plotly.express.trendline_functions` for more details on these functions and how",\n "to configure them with the `trendline_options` argument.",\n ],\n trendline_options=[\n "dict",\n "Options passed as the first argument to the function from `plotly.express.trendline_functions` ",\n "named in the `trendline` argument.",\n ],\n trendline_color_override=[\n "str",\n "Valid CSS color.",\n "If provided, and if `trendline` is set, all trendlines will be drawn in this color rather than in the same color as the traces from which they draw their inputs.",\n ],\n trendline_scope=[\n "str (one of `'trace'` or `'overall'`, default `'trace'`)",\n "If `'trace'`, then one trendline is drawn per trace (i.e. per color, symbol, facet, animation frame etc) and if `'overall'` then one trendline is computed for the entire dataset, and replicated across all facets.",\n ],\n render_mode=[\n "str",\n "One of `'auto'`, `'svg'` or `'webgl'`, default `'auto'`",\n "Controls the browser API used to draw marks.",\n "`'svg'` is appropriate for figures of less than 1000 data points, and will allow for fully-vectorized output.",\n "`'webgl'` is likely necessary for acceptable performance above 1000 points but rasterizes part of the output. ",\n "`'auto'` uses heuristics to choose the mode.",\n ],\n direction=[\n "str",\n "One of '`counterclockwise'` or `'clockwise'`. Default is `'clockwise'`",\n "Sets the direction in which increasing values of the angular axis are drawn.",\n ],\n start_angle=[\n "int (default `90`)",\n "Sets start angle for the angular axis, with 0 being due east and 90 being due north.",\n ],\n histfunc=[\n "str (default `'count'` if no arguments are provided, else `'sum'`)",\n "One of `'count'`, `'sum'`, `'avg'`, `'min'`, or `'max'`.",\n "Function used to aggregate values for summarization (note: can be normalized with `histnorm`).",\n ],\n histnorm=[\n "str (default `None`)",\n "One of `'percent'`, `'probability'`, `'density'`, or `'probability density'`",\n "If `None`, the output of `histfunc` is used as is.",\n "If `'probability'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins.",\n "If `'percent'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins and multiplied by 100.",\n "If `'density'`, the output of `histfunc` for a given bin is divided by the size of the bin.",\n "If `'probability density'`, the output of `histfunc` for a given bin is normalized such that it corresponds to the probability that a random event whose distribution is described by the output of `histfunc` will fall into that bin.",\n ],\n barnorm=[\n "str (default `None`)",\n "One of `'fraction'` or `'percent'`.",\n "If `'fraction'`, the value of each bar is divided by the sum of all values at that location coordinate.",\n "`'percent'` is the same but multiplied by 100 to show percentages.",\n "`None` will stack up all values at each location coordinate.",\n ],\n groupnorm=[\n "str (default `None`)",\n "One of `'fraction'` or `'percent'`.",\n "If `'fraction'`, the value of each point is divided by the sum of all values at that location coordinate.",\n "`'percent'` is the same but multiplied by 100 to show percentages.",\n "`None` will stack up all values at each location coordinate.",\n ],\n barmode=[\n "str (default `'relative'`)",\n "One of `'group'`, `'overlay'` or `'relative'`",\n "In `'relative'` mode, bars are stacked above zero for positive values and below zero for negative values.",\n "In `'overlay'` mode, bars are drawn on top of one another.",\n "In `'group'` mode, bars are placed beside each other.",\n ],\n boxmode=[\n "str (default `'group'`)",\n "One of `'group'` or `'overlay'`",\n "In `'overlay'` mode, boxes are on drawn top of one another.",\n "In `'group'` mode, boxes are placed beside each other.",\n ],\n violinmode=[\n "str (default `'group'`)",\n "One of `'group'` or `'overlay'`",\n "In `'overlay'` mode, violins are on drawn top of one another.",\n "In `'group'` mode, violins are placed beside each other.",\n ],\n stripmode=[\n "str (default `'group'`)",\n "One of `'group'` or `'overlay'`",\n "In `'overlay'` mode, strips are on drawn top of one another.",\n "In `'group'` mode, strips are placed beside each other.",\n ],\n zoom=["int (default `8`)", "Between 0 and 20.", "Sets map zoom level."],\n orientation=[\n "str, one of `'h'` for horizontal or `'v'` for vertical. ",\n "(default `'v'` if `x` and `y` are provided and both continuous or both categorical, ",\n "otherwise `'v'`(`'h'`) if `x`(`y`) is categorical and `y`(`x`) is continuous, ",\n "otherwise `'v'`(`'h'`) if only `x`(`y`) is provided) ",\n ],\n line_close=[\n "boolean (default `False`)",\n "If `True`, an extra line segment is drawn between the first and last point.",\n ],\n line_shape=[\n "str (default `'linear'`)",\n "One of `'linear'`, `'spline'`, `'hv'`, `'vh'`, `'hvh'`, or `'vhv'`",\n ],\n fitbounds=["str (default `False`).", "One of `False`, `locations` or `geojson`."],\n basemap_visible=["bool", "Force the basemap visibility."],\n scope=[\n "str (default `'world'`).",\n "One of `'world'`, `'usa'`, `'europe'`, `'asia'`, `'africa'`, `'north america'`, or `'south america'`"\n "Default is `'world'` unless `projection` is set to `'albers usa'`, which forces `'usa'`.",\n ],\n projection=[\n "str ",\n "One of `'equirectangular'`, `'mercator'`, `'orthographic'`, `'natural earth'`, `'kavrayskiy7'`, `'miller'`, `'robinson'`, `'eckert4'`, `'azimuthal equal area'`, `'azimuthal equidistant'`, `'conic equal area'`, `'conic conformal'`, `'conic equidistant'`, `'gnomonic'`, `'stereographic'`, `'mollweide'`, `'hammer'`, `'transverse mercator'`, `'albers usa'`, `'winkel tripel'`, `'aitoff'`, or `'sinusoidal'`"\n "Default depends on `scope`.",\n ],\n center=[\n "dict",\n "Dict keys are `'lat'` and `'lon'`",\n "Sets the center point of the map.",\n ],\n map_style=[\n "str (default `'basic'`)",\n "Identifier of base map style.",\n "Allowed values are `'basic'`, `'carto-darkmatter'`, `'carto-darkmatter-nolabels'`, `'carto-positron'`, `'carto-positron-nolabels'`, `'carto-voyager'`, `'carto-voyager-nolabels'`, `'dark'`, `'light'`, `'open-street-map'`, `'outdoors'`, `'satellite'`, `'satellite-streets'`, `'streets'`, `'white-bg'`.",\n ],\n mapbox_style=[\n "str (default `'basic'`, needs Mapbox API token)",\n "Identifier of base map style, some of which require a Mapbox or Stadia Maps API token to be set using `plotly.express.set_mapbox_access_token()`.",\n "Allowed values which do not require a token are `'open-street-map'`, `'white-bg'`, `'carto-positron'`, `'carto-darkmatter'`.",\n "Allowed values which require a Mapbox API token are `'basic'`, `'streets'`, `'outdoors'`, `'light'`, `'dark'`, `'satellite'`, `'satellite-streets'`.",\n "Allowed values which require a Stadia Maps API token are `'stamen-terrain'`, `'stamen-toner'`, `'stamen-watercolor'`.",\n ],\n points=[\n "str or boolean (default `'outliers'`)",\n "One of `'outliers'`, `'suspectedoutliers'`, `'all'`, or `False`.",\n "If `'outliers'`, only the sample points lying outside the whiskers are shown.",\n "If `'suspectedoutliers'`, all outlier points are shown and those less than 4*Q1-3*Q3 or greater than 4*Q3-3*Q1 are highlighted with the marker's `'outliercolor'`.",\n "If `'outliers'`, only the sample points lying outside the whiskers are shown.",\n "If `'all'`, all sample points are shown.",\n "If `False`, no sample points are shown and the whiskers extend to the full range of the sample.",\n ],\n box=["boolean (default `False`)", "If `True`, boxes are drawn inside the violins."],\n notched=["boolean (default `False`)", "If `True`, boxes are drawn with notches."],\n geojson=[\n "GeoJSON-formatted dict",\n "Must contain a Polygon feature collection, with IDs, which are references from `locations`.",\n ],\n featureidkey=[\n "str (default: `'id'`)",\n "Path to field in GeoJSON feature object with which to match the values passed in to `locations`."\n "The most common alternative to the default is of the form `'properties.<key>`.",\n ],\n cumulative=[\n "boolean (default `False`)",\n "If `True`, histogram values are cumulative.",\n ],\n nbins=["int", "Positive integer.", "Sets the number of bins."],\n nbinsx=["int", "Positive integer.", "Sets the number of bins along the x axis."],\n nbinsy=["int", "Positive integer.", "Sets the number of bins along the y axis."],\n branchvalues=[\n "str",\n "'total' or 'remainder'",\n "Determines how the items in `values` are summed. When"\n "set to 'total', items in `values` are taken to be value"\n "of all its descendants. When set to 'remainder', items"\n "in `values` corresponding to the root and the branches"\n ":sectors are taken to be the extra part not part of the"\n "sum of the values at their leaves.",\n ],\n maxdepth=[\n "int",\n "Positive integer",\n "Sets the number of rendered sectors from any given `level`. Set `maxdepth` to -1 to render all the"\n "levels in the hierarchy.",\n ],\n ecdfnorm=[\n "string or `None` (default `'probability'`)",\n "One of `'probability'` or `'percent'`",\n "If `None`, values will be raw counts or sums.",\n "If `'probability', values will be probabilities normalized from 0 to 1.",\n "If `'percent', values will be percentages normalized from 0 to 100.",\n ],\n ecdfmode=[\n "string (default `'standard'`)",\n "One of `'standard'`, `'complementary'` or `'reversed'`",\n "If `'standard'`, the ECDF is plotted such that values represent data at or below the point.",\n "If `'complementary'`, the CCDF is plotted such that values represent data above the point.",\n "If `'reversed'`, a variant of the CCDF is plotted such that values represent data at or above the point.",\n ],\n text_auto=[\n "bool or string (default `False`)",\n "If `True` or a string, the x or y or z values will be displayed as text, depending on the orientation",\n "A string like `'.2f'` will be interpreted as a `texttemplate` numeric formatting directive.",\n ],\n)\n\n\ndef make_docstring(fn, override_dict=None, append_dict=None):\n override_dict = {} if override_dict is None else override_dict\n append_dict = {} if append_dict is None else append_dict\n tw = TextWrapper(\n width=75,\n initial_indent=" ",\n subsequent_indent=" ",\n break_on_hyphens=False,\n )\n result = (fn.__doc__ or "") + "\nParameters\n----------\n"\n for param in getfullargspec(fn)[0]:\n if override_dict.get(param):\n param_doc = list(override_dict[param])\n else:\n param_doc = list(docs[param])\n if append_dict.get(param):\n param_doc += append_dict[param]\n param_desc_list = param_doc[1:]\n param_desc = (\n tw.fill(" ".join(param_desc_list or ""))\n if param in docs or param in override_dict\n else "(documentation missing from map)"\n )\n\n param_type = param_doc[0]\n result += "%s: %s\n%s\n" % (param, param_type, param_desc)\n result += "\nReturns\n-------\n"\n result += " plotly.graph_objects.Figure"\n return result\n
.venv\Lib\site-packages\plotly\express\_doc.py
_doc.py
Python
31,158
0.95
0.096875
0
python-kit
962
2024-09-04T18:27:31.751158
BSD-3-Clause
false
e86e6f73f90408937405c291d7d9cc15
import plotly.graph_objs as go\nfrom _plotly_utils.basevalidators import ColorscaleValidator\nfrom ._core import apply_default_cascade, init_figure, configure_animation_controls\nfrom .imshow_utils import rescale_intensity, _integer_ranges, _integer_types\nimport narwhals.stable.v1 as nw\nimport numpy as np\nimport itertools\nfrom plotly.utils import image_array_to_data_uri\n\ntry:\n import xarray\n\n xarray_imported = True\nexcept ImportError:\n xarray_imported = False\n\n_float_types = []\n\n\ndef _vectorize_zvalue(z, mode="max"):\n alpha = 255 if mode == "max" else 0\n if z is None:\n return z\n elif np.isscalar(z):\n return [z] * 3 + [alpha]\n elif len(z) == 1:\n return list(z) * 3 + [alpha]\n elif len(z) == 3:\n return list(z) + [alpha]\n elif len(z) == 4:\n return z\n else:\n raise ValueError(\n "zmax can be a scalar, or an iterable of length 1, 3 or 4. "\n "A value of %s was passed for zmax." % str(z)\n )\n\n\ndef _infer_zmax_from_type(img):\n dt = img.dtype.type\n rtol = 1.05\n if dt in _integer_types:\n return _integer_ranges[dt][1]\n else:\n im_max = img[np.isfinite(img)].max()\n if im_max <= 1 * rtol:\n return 1\n elif im_max <= 255 * rtol:\n return 255\n elif im_max <= 65535 * rtol:\n return 65535\n else:\n return 2**32\n\n\ndef imshow(\n img,\n zmin=None,\n zmax=None,\n origin=None,\n labels={},\n x=None,\n y=None,\n animation_frame=None,\n facet_col=None,\n facet_col_wrap=None,\n facet_col_spacing=None,\n facet_row_spacing=None,\n color_continuous_scale=None,\n color_continuous_midpoint=None,\n range_color=None,\n title=None,\n template=None,\n width=None,\n height=None,\n aspect=None,\n contrast_rescaling=None,\n binary_string=None,\n binary_backend="auto",\n binary_compression_level=4,\n binary_format="png",\n text_auto=False,\n) -> go.Figure:\n """\n Display an image, i.e. data on a 2D regular raster.\n\n Parameters\n ----------\n\n img: array-like image, or xarray\n The image data. Supported array shapes are\n\n - (M, N): an image with scalar data. The data is visualized\n using a colormap.\n - (M, N, 3): an image with RGB values.\n - (M, N, 4): an image with RGBA values, i.e. including transparency.\n\n zmin, zmax : scalar or iterable, optional\n zmin and zmax define the scalar range that the colormap covers. By default,\n zmin and zmax correspond to the min and max values of the datatype for integer\n datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For\n a multichannel image of floats, the max of the image is computed and zmax is the\n smallest power of 256 (1, 255, 65535) greater than this max value,\n with a 5% tolerance. For a single-channel image, the max of the image is used.\n Overridden by range_color.\n\n origin : str, 'upper' or 'lower' (default 'upper')\n position of the [0, 0] pixel of the image array, in the upper left or lower left\n corner. The convention 'upper' is typically used for matrices and images.\n\n labels : dict with str keys and str values (default `{}`)\n Sets names used in the figure for axis titles (keys ``x`` and ``y``),\n colorbar title and hoverlabel (key ``color``). The values should correspond\n to the desired label to be displayed. If ``img`` is an xarray, dimension\n names are used for axis titles, and long name for the colorbar title\n (unless overridden in ``labels``). Possible keys are: x, y, and color.\n\n x, y: list-like, optional\n x and y are used to label the axes of single-channel heatmap visualizations and\n their lengths must match the lengths of the second and first dimensions of the\n img argument. They are auto-populated if the input is an xarray.\n\n animation_frame: int or str, optional (default None)\n axis number along which the image array is sliced to create an animation plot.\n If `img` is an xarray, `animation_frame` can be the name of one the dimensions.\n\n facet_col: int or str, optional (default None)\n axis number along which the image array is sliced to create a facetted plot.\n If `img` is an xarray, `facet_col` can be the name of one the dimensions.\n\n facet_col_wrap: int\n Maximum number of facet columns. Wraps the column variable at this width,\n so that the column facets span multiple rows.\n Ignored if `facet_col` is None.\n\n facet_col_spacing: float between 0 and 1\n Spacing between facet columns, in paper units. Default is 0.02.\n\n facet_row_spacing: float between 0 and 1\n Spacing between facet rows created when ``facet_col_wrap`` is used, in\n paper units. Default is 0.0.7.\n\n color_continuous_scale : str or list of str\n colormap used to map scalar data to colors (for a 2D image). This parameter is\n not used for RGB or RGBA images. If a string is provided, it should be the name\n of a known color scale, and if a list is provided, it should be a list of CSS-\n compatible colors.\n\n color_continuous_midpoint : number\n If set, computes the bounds of the continuous color scale to have the desired\n midpoint. Overridden by range_color or zmin and zmax.\n\n range_color : list of two numbers\n If provided, overrides auto-scaling on the continuous color scale, including\n overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only\n for single-channel images.\n\n title : str\n The figure title.\n\n template : str or dict or plotly.graph_objects.layout.Template instance\n The figure template name or definition.\n\n width : number\n The figure width in pixels.\n\n height: number\n The figure height in pixels.\n\n aspect: 'equal', 'auto', or None\n - 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)\n - 'auto': The axes is kept fixed and the aspect ratio of pixels is\n adjusted so that the data fit in the axes. In general, this will\n result in non-square pixels.\n - if None, 'equal' is used for numpy arrays and 'auto' for xarrays\n (which have typically heterogeneous coordinates)\n\n contrast_rescaling: 'minmax', 'infer', or None\n how to determine data values corresponding to the bounds of the color\n range, when zmin or zmax are not passed. If `minmax`, the min and max\n values of the image are used. If `infer`, a heuristic based on the image\n data type is used.\n\n binary_string: bool, default None\n if True, the image data are first rescaled and encoded as uint8 and\n then passed to plotly.js as a b64 PNG string. If False, data are passed\n unchanged as a numerical array. Setting to True may lead to performance\n gains, at the cost of a loss of precision depending on the original data\n type. If None, use_binary_string is set to True for multichannel (eg) RGB\n arrays, and to False for single-channel (2D) arrays. 2D arrays are\n represented as grayscale and with no colorbar if use_binary_string is\n True.\n\n binary_backend: str, 'auto' (default), 'pil' or 'pypng'\n Third-party package for the transformation of numpy arrays to\n png b64 strings. If 'auto', Pillow is used if installed, otherwise\n pypng.\n\n binary_compression_level: int, between 0 and 9 (default 4)\n png compression level to be passed to the backend when transforming an\n array to a png b64 string. Increasing `binary_compression` decreases the\n size of the png string, but the compression step takes more time. For most\n images it is not worth using levels greater than 5, but it's possible to\n test `len(fig.data[0].source)` and to time the execution of `imshow` to\n tune the level of compression. 0 means no compression (not recommended).\n\n binary_format: str, 'png' (default) or 'jpg'\n compression format used to generate b64 string. 'png' is recommended\n since it uses lossless compression, but 'jpg' (lossy) compression can\n result if smaller binary strings for natural images.\n\n text_auto: bool or str (default `False`)\n If `True` or a string, single-channel `img` values will be displayed as text.\n A string like `'.2f'` will be interpreted as a `texttemplate` numeric formatting directive.\n\n Returns\n -------\n fig : graph_objects.Figure containing the displayed image\n\n See also\n --------\n\n plotly.graph_objects.Image : image trace\n plotly.graph_objects.Heatmap : heatmap trace\n\n Notes\n -----\n\n In order to update and customize the returned figure, use\n `go.Figure.update_traces` or `go.Figure.update_layout`.\n\n If an xarray is passed, dimensions names and coordinates are used for\n axes labels and ticks.\n """\n args = locals()\n apply_default_cascade(args)\n labels = labels.copy()\n nslices_facet = 1\n if facet_col is not None:\n if isinstance(facet_col, str):\n facet_col = img.dims.index(facet_col)\n nslices_facet = img.shape[facet_col]\n facet_slices = range(nslices_facet)\n ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices_facet\n nrows = (\n nslices_facet // ncols + 1\n if nslices_facet % ncols\n else nslices_facet // ncols\n )\n else:\n nrows = 1\n ncols = 1\n if animation_frame is not None:\n if isinstance(animation_frame, str):\n animation_frame = img.dims.index(animation_frame)\n nslices_animation = img.shape[animation_frame]\n animation_slices = range(nslices_animation)\n slice_dimensions = (facet_col is not None) + (\n animation_frame is not None\n ) # 0, 1, or 2\n facet_label = None\n animation_label = None\n img_is_xarray = False\n # ----- Define x and y, set labels if img is an xarray -------------------\n if xarray_imported and isinstance(img, xarray.DataArray):\n dims = list(img.dims)\n img_is_xarray = True\n pop_indexes = []\n if facet_col is not None:\n facet_slices = img.coords[img.dims[facet_col]].values\n pop_indexes.append(facet_col)\n facet_label = img.dims[facet_col]\n if animation_frame is not None:\n animation_slices = img.coords[img.dims[animation_frame]].values\n pop_indexes.append(animation_frame)\n animation_label = img.dims[animation_frame]\n # Remove indices in sorted order.\n for index in sorted(pop_indexes, reverse=True):\n _ = dims.pop(index)\n y_label, x_label = dims[0], dims[1]\n # np.datetime64 is not handled correctly by go.Heatmap\n for ax in [x_label, y_label]:\n if np.issubdtype(img.coords[ax].dtype, np.datetime64):\n img.coords[ax] = img.coords[ax].astype(str)\n if x is None:\n x = img.coords[x_label].values\n if y is None:\n y = img.coords[y_label].values\n if aspect is None:\n aspect = "auto"\n if labels.get("x", None) is None:\n labels["x"] = x_label\n if labels.get("y", None) is None:\n labels["y"] = y_label\n if labels.get("animation_frame", None) is None:\n labels["animation_frame"] = animation_label\n if labels.get("facet_col", None) is None:\n labels["facet_col"] = facet_label\n if labels.get("color", None) is None:\n labels["color"] = xarray.plot.utils.label_from_attrs(img)\n labels["color"] = labels["color"].replace("\n", "<br>")\n else:\n if hasattr(img, "columns") and hasattr(img.columns, "__len__"):\n if x is None:\n x = img.columns\n if labels.get("x", None) is None and hasattr(img.columns, "name"):\n labels["x"] = img.columns.name or ""\n if hasattr(img, "index") and hasattr(img.index, "__len__"):\n if y is None:\n y = img.index\n if labels.get("y", None) is None and hasattr(img.index, "name"):\n labels["y"] = img.index.name or ""\n\n if labels.get("x", None) is None:\n labels["x"] = ""\n if labels.get("y", None) is None:\n labels["y"] = ""\n if labels.get("color", None) is None:\n labels["color"] = ""\n if aspect is None:\n aspect = "equal"\n\n # --- Set the value of binary_string (forbidden for pandas)\n img = nw.from_native(img, pass_through=True)\n if isinstance(img, nw.DataFrame):\n if binary_string:\n raise ValueError("Binary strings cannot be used with pandas arrays")\n is_dataframe = True\n else:\n is_dataframe = False\n\n # --------------- Starting from here img is always a numpy array --------\n img = np.asanyarray(img)\n # Reshape array so that animation dimension comes first, then facets, then images\n if facet_col is not None:\n img = np.moveaxis(img, facet_col, 0)\n if animation_frame is not None and animation_frame < facet_col:\n animation_frame += 1\n facet_col = True\n if animation_frame is not None:\n img = np.moveaxis(img, animation_frame, 0)\n animation_frame = True\n args["animation_frame"] = (\n "animation_frame"\n if labels.get("animation_frame") is None\n else labels["animation_frame"]\n )\n iterables = ()\n if animation_frame is not None:\n iterables += (range(nslices_animation),)\n if facet_col is not None:\n iterables += (range(nslices_facet),)\n\n # Default behaviour of binary_string: True for RGB images, False for 2D\n if binary_string is None:\n binary_string = img.ndim >= (3 + slice_dimensions) and not is_dataframe\n\n # Cast bools to uint8 (also one byte)\n if img.dtype == bool:\n img = 255 * img.astype(np.uint8)\n\n if range_color is not None:\n zmin = range_color[0]\n zmax = range_color[1]\n\n # -------- Contrast rescaling: either minmax or infer ------------------\n if contrast_rescaling is None:\n contrast_rescaling = "minmax" if img.ndim == (2 + slice_dimensions) else "infer"\n\n # We try to set zmin and zmax only if necessary, because traces have good defaults\n if contrast_rescaling == "minmax":\n # When using binary_string and minmax we need to set zmin and zmax to rescale the image\n if (zmin is not None or binary_string) and zmax is None:\n zmax = img.max()\n if (zmax is not None or binary_string) and zmin is None:\n zmin = img.min()\n else:\n # For uint8 data and infer we let zmin and zmax to be None if passed as None\n if zmax is None and img.dtype != np.uint8:\n zmax = _infer_zmax_from_type(img)\n if zmin is None and zmax is not None:\n zmin = 0\n\n # For 2d data, use Heatmap trace, unless binary_string is True\n if img.ndim == 2 + slice_dimensions and not binary_string:\n y_index = slice_dimensions\n if y is not None and img.shape[y_index] != len(y):\n raise ValueError(\n "The length of the y vector must match the length of the first "\n + "dimension of the img matrix."\n )\n x_index = slice_dimensions + 1\n if x is not None and img.shape[x_index] != len(x):\n raise ValueError(\n "The length of the x vector must match the length of the second "\n + "dimension of the img matrix."\n )\n\n texttemplate = None\n if text_auto is True:\n texttemplate = "%{z}"\n elif text_auto is not False:\n texttemplate = "%{z:" + text_auto + "}"\n\n traces = [\n go.Heatmap(\n x=x,\n y=y,\n z=img[index_tup],\n coloraxis="coloraxis1",\n name=str(i),\n texttemplate=texttemplate,\n )\n for i, index_tup in enumerate(itertools.product(*iterables))\n ]\n autorange = True if origin == "lower" else "reversed"\n layout = dict(yaxis=dict(autorange=autorange))\n if aspect == "equal":\n layout["xaxis"] = dict(scaleanchor="y", constrain="domain")\n layout["yaxis"]["constrain"] = "domain"\n colorscale_validator = ColorscaleValidator("colorscale", "imshow")\n layout["coloraxis1"] = dict(\n colorscale=colorscale_validator.validate_coerce(\n args["color_continuous_scale"]\n ),\n cmid=color_continuous_midpoint,\n cmin=zmin,\n cmax=zmax,\n )\n if labels["color"]:\n layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])\n\n # For 2D+RGB data, use Image trace\n elif (\n img.ndim >= 3\n and (img.shape[-1] in [3, 4] or slice_dimensions and binary_string)\n ) or (img.ndim == 2 and binary_string):\n rescale_image = True # to check whether image has been modified\n if zmin is not None and zmax is not None:\n zmin, zmax = (\n _vectorize_zvalue(zmin, mode="min"),\n _vectorize_zvalue(zmax, mode="max"),\n )\n x0, y0, dx, dy = (None,) * 4\n error_msg_xarray = (\n "Non-numerical coordinates were passed with xarray `img`, but "\n "the Image trace cannot handle it. Please use `binary_string=False` "\n "for 2D data or pass instead the numpy array `img.values` to `px.imshow`."\n )\n if x is not None:\n x = np.asanyarray(x)\n if np.issubdtype(x.dtype, np.number):\n x0 = x[0]\n dx = x[1] - x[0]\n else:\n error_msg = (\n error_msg_xarray\n if img_is_xarray\n else (\n "Only numerical values are accepted for the `x` parameter "\n "when an Image trace is used."\n )\n )\n raise ValueError(error_msg)\n if y is not None:\n y = np.asanyarray(y)\n if np.issubdtype(y.dtype, np.number):\n y0 = y[0]\n dy = y[1] - y[0]\n else:\n error_msg = (\n error_msg_xarray\n if img_is_xarray\n else (\n "Only numerical values are accepted for the `y` parameter "\n "when an Image trace is used."\n )\n )\n raise ValueError(error_msg)\n if binary_string:\n if zmin is None and zmax is None: # no rescaling, faster\n img_rescaled = img\n rescale_image = False\n elif img.ndim == 2 + slice_dimensions: # single-channel image\n img_rescaled = rescale_intensity(\n img, in_range=(zmin[0], zmax[0]), out_range=np.uint8\n )\n else:\n img_rescaled = np.stack(\n [\n rescale_intensity(\n img[..., ch],\n in_range=(zmin[ch], zmax[ch]),\n out_range=np.uint8,\n )\n for ch in range(img.shape[-1])\n ],\n axis=-1,\n )\n img_str = [\n image_array_to_data_uri(\n img_rescaled[index_tup],\n backend=binary_backend,\n compression=binary_compression_level,\n ext=binary_format,\n )\n for index_tup in itertools.product(*iterables)\n ]\n\n traces = [\n go.Image(source=img_str_slice, name=str(i), x0=x0, y0=y0, dx=dx, dy=dy)\n for i, img_str_slice in enumerate(img_str)\n ]\n else:\n colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"\n traces = [\n go.Image(\n z=img[index_tup],\n zmin=zmin,\n zmax=zmax,\n colormodel=colormodel,\n x0=x0,\n y0=y0,\n dx=dx,\n dy=dy,\n )\n for index_tup in itertools.product(*iterables)\n ]\n layout = {}\n if origin == "lower" or (dy is not None and dy < 0):\n layout["yaxis"] = dict(autorange=True)\n if dx is not None and dx < 0:\n layout["xaxis"] = dict(autorange="reversed")\n else:\n raise ValueError(\n "px.imshow only accepts 2D single-channel, RGB or RGBA images. "\n "An image of shape %s was provided. "\n "Alternatively, 3- or 4-D single or multichannel datasets can be "\n "visualized using the `facet_col` or/and `animation_frame` arguments."\n % str(img.shape)\n )\n\n # Now build figure\n col_labels = []\n if facet_col is not None:\n slice_label = (\n "facet_col" if labels.get("facet_col") is None else labels["facet_col"]\n )\n col_labels = [f"{slice_label}={i}" for i in facet_slices]\n fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])\n for attr_name in ["height", "width"]:\n if args[attr_name]:\n layout[attr_name] = args[attr_name]\n if args["title"]:\n layout["title_text"] = args["title"]\n elif args["template"].layout.margin.t is None:\n layout["margin"] = {"t": 60}\n\n frame_list = []\n for index, trace in enumerate(traces):\n if (facet_col and index < nrows * ncols) or index == 0:\n fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)\n if animation_frame is not None:\n for i, index in zip(range(nslices_animation), animation_slices):\n frame_list.append(\n dict(\n data=traces[nslices_facet * i : nslices_facet * (i + 1)],\n layout=layout,\n name=str(index),\n )\n )\n if animation_frame:\n fig.frames = frame_list\n fig.update_layout(layout)\n # Hover name, z or color\n if binary_string and rescale_image and not np.all(img == img_rescaled):\n # we rescaled the image, hence z is not displayed in hover since it does\n # not correspond to img values\n hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (\n labels["x"] or "x",\n labels["y"] or "y",\n )\n else:\n if trace["type"] == "heatmap":\n hover_name = "%{z}"\n elif img.ndim == 2:\n hover_name = "%{z[0]}"\n elif img.ndim == 3 and img.shape[-1] == 3:\n hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"\n else:\n hover_name = "%{z}"\n hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (\n labels["x"] or "x",\n labels["y"] or "y",\n labels["color"] or "color",\n hover_name,\n )\n fig.update_traces(hovertemplate=hovertemplate)\n if labels["x"]:\n fig.update_xaxes(title_text=labels["x"], row=1)\n if labels["y"]:\n fig.update_yaxes(title_text=labels["y"], col=1)\n configure_animation_controls(args, go.Image, fig)\n fig.update_layout(template=args["template"], overwrite=True)\n return fig\n
.venv\Lib\site-packages\plotly\express\_imshow.py
_imshow.py
Python
23,681
0.95
0.216529
0.032787
awesome-app
280
2023-10-04T23:38:48.096393
Apache-2.0
false
ebdc96d05402242ee63750276c4baf39
class IdentityMap(object):\n """\n `dict`-like object which acts as if the value for any key is the key itself. Objects\n of this class can be passed in to arguments like `color_discrete_map` to\n use the provided data values as colors, rather than mapping them to colors cycled\n from `color_discrete_sequence`. This works for any `_map` argument to Plotly Express\n functions, such as `line_dash_map` and `symbol_map`.\n """\n\n def __getitem__(self, key):\n return key\n\n def __contains__(self, key):\n return True\n\n def copy(self):\n return self\n\n\nclass Constant(object):\n """\n Objects of this class can be passed to Plotly Express functions that expect column\n identifiers or list-like objects to indicate that this attribute should take on a\n constant value. An optional label can be provided.\n """\n\n def __init__(self, value, label=None):\n self.value = value\n self.label = label\n\n\nclass Range(object):\n """\n Objects of this class can be passed to Plotly Express functions that expect column\n identifiers or list-like objects to indicate that this attribute should be mapped\n onto integers starting at 0. An optional label can be provided.\n """\n\n def __init__(self, label=None):\n self.label = label\n
.venv\Lib\site-packages\plotly\express\_special_inputs.py
_special_inputs.py
Python
1,300
0.85
0.35
0
awesome-app
146
2024-04-19T07:40:32.077379
Apache-2.0
false
7e255367d260211f5a45d9a7231680e0
# ruff: noqa: E402\n\n"""\n`plotly.express` is a terse, consistent, high-level wrapper around `plotly.graph_objects`\nfor rapid data exploration and figure generation. Learn more at https://plotly.com/python/plotly-express/\n"""\n\nfrom plotly import optional_imports\n\nnp = optional_imports.get_module("numpy")\nif np is None:\n raise ImportError(\n """\\nPlotly Express requires numpy to be installed. You can install numpy using pip with:\n\n$ pip install numpy\n\nOr install Plotly Express and its dependencies directly with:\n\n$ pip install "plotly[express]"\n\nYou can also use Plotly Graph Objects to create a large number of charts without installing\nnumpy. See examples here: https://plotly.com/python/graph-objects/\n"""\n )\n\nfrom ._imshow import imshow\nfrom ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n scatter_polar,\n scatter_ternary,\n scatter_map,\n scatter_mapbox,\n scatter_geo,\n line,\n line_3d,\n line_polar,\n line_ternary,\n line_map,\n line_mapbox,\n line_geo,\n area,\n bar,\n timeline,\n bar_polar,\n violin,\n box,\n strip,\n histogram,\n ecdf,\n scatter_matrix,\n parallel_coordinates,\n parallel_categories,\n choropleth,\n density_contour,\n density_heatmap,\n pie,\n sunburst,\n treemap,\n icicle,\n funnel,\n funnel_area,\n choropleth_map,\n choropleth_mapbox,\n density_map,\n density_mapbox,\n)\n\n\nfrom ._core import ( # noqa: F401\n set_mapbox_access_token,\n defaults,\n get_trendline_results,\n NO_COLOR,\n)\n\nfrom ._special_inputs import IdentityMap, Constant, Range # noqa: F401\n\nfrom . import data, colors, trendline_functions # noqa: F401\n\n__all__ = [\n "scatter",\n "scatter_3d",\n "scatter_polar",\n "scatter_ternary",\n "scatter_map",\n "scatter_mapbox",\n "scatter_geo",\n "scatter_matrix",\n "density_contour",\n "density_heatmap",\n "density_map",\n "density_mapbox",\n "line",\n "line_3d",\n "line_polar",\n "line_ternary",\n "line_map",\n "line_mapbox",\n "line_geo",\n "parallel_coordinates",\n "parallel_categories",\n "area",\n "bar",\n "timeline",\n "bar_polar",\n "violin",\n "box",\n "strip",\n "histogram",\n "ecdf",\n "choropleth",\n "choropleth_map",\n "choropleth_mapbox",\n "pie",\n "sunburst",\n "treemap",\n "icicle",\n "funnel",\n "funnel_area",\n "imshow",\n "data",\n "colors",\n "trendline_functions",\n "set_mapbox_access_token",\n "get_trendline_results",\n "IdentityMap",\n "Constant",\n "Range",\n "NO_COLOR",\n]\n
.venv\Lib\site-packages\plotly\express\__init__.py
__init__.py
Python
2,575
0.95
0.015152
0.008403
vue-tools
449
2025-07-09T18:12:51.266000
GPL-3.0
false
920627241d20eaec2d57ec271bd9ceb9
# ruff: noqa: F405\n"""For a list of colors available in `plotly.express.colors`, please see\n\n* the `tutorial on discrete color sequences <https://plotly.com/python/discrete-color/#color-sequences-in-plotly-express>`_\n* the `list of built-in continuous color scales <https://plotly.com/python/builtin-colorscales/>`_\n* the `tutorial on continuous colors <https://plotly.com/python/colorscales/>`_\n\nColor scales are available within the following namespaces\n\n* cyclical\n* diverging\n* qualitative\n* sequential\n"""\n\nfrom plotly.colors import * # noqa: F403\n\n\n__all__ = [\n "named_colorscales",\n "cyclical",\n "diverging",\n "sequential",\n "qualitative",\n "colorbrewer",\n "colorbrewer",\n "carto",\n "cmocean",\n "color_parser",\n "colorscale_to_colors",\n "colorscale_to_scale",\n "convert_colors_to_same_type",\n "convert_colorscale_to_rgb",\n "convert_dict_colors_to_same_type",\n "convert_to_RGB_255",\n "find_intermediate_color",\n "hex_to_rgb",\n "label_rgb",\n "make_colorscale",\n "n_colors",\n "unconvert_from_RGB_255",\n "unlabel_rgb",\n "validate_colors",\n "validate_colors_dict",\n "validate_colorscale",\n "validate_scale_values",\n "plotlyjs",\n "DEFAULT_PLOTLY_COLORS",\n "PLOTLY_SCALES",\n "get_colorscale",\n "sample_colorscale",\n]\n
.venv\Lib\site-packages\plotly\express\colors\__init__.py
__init__.py
Python
1,314
0.95
0
0.173913
vue-tools
245
2025-02-28T16:40:13.229245
BSD-3-Clause
false
62a73ae35ac03d8d474b95a10f028b06
\n\n
.venv\Lib\site-packages\plotly\express\colors\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,311
0.8
0
0.5
node-utils
135
2024-01-06T00:37:42.042341
MIT
false
4db1238a07119060fa871e2581a956e3
# ruff: noqa: F405\n"""Built-in datasets for demonstration, educational and test purposes."""\n\nfrom plotly.data import * # noqa: F403\n\n__all__ = [\n "carshare",\n "election",\n "election_geojson",\n "experiment",\n "gapminder",\n "iris",\n "medals_wide",\n "medals_long",\n "stocks",\n "tips",\n "wind",\n]\n
.venv\Lib\site-packages\plotly\express\data\__init__.py
__init__.py
Python
328
0.95
0.055556
0.0625
react-lib
193
2023-11-01T06:14:00.356879
Apache-2.0
false
f5c14e1699df8e22057195360945b982
\n\n
.venv\Lib\site-packages\plotly\express\data\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
451
0.7
0.333333
0
vue-tools
738
2023-10-07T04:42:09.826141
MIT
false
26e47761ded493c3c70706c80489bf03
"""\nThe `trendline_functions` module contains functions which are called by Plotly Express\nwhen the `trendline` argument is used. Valid values for `trendline` are the names of the\nfunctions in this module, and the value of the `trendline_options` argument to PX\nfunctions is passed in as the first argument to these functions when called.\n\nNote that the functions in this module are not meant to be called directly, and are\nexposed as part of the public API for documentation purposes.\n"""\n\n__all__ = ["ols", "lowess", "rolling", "ewm", "expanding"]\n\n\ndef ols(trendline_options, x_raw, x, y, x_label, y_label, non_missing):\n """Ordinary Least Squares (OLS) trendline function\n\n Requires `statsmodels` to be installed.\n\n This trendline function causes fit results to be stored within the figure,\n accessible via the `plotly.express.get_trendline_results` function. The fit results\n are the output of the `statsmodels.api.OLS` function.\n\n Valid keys for the `trendline_options` dict are:\n\n - `add_constant` (`bool`, default `True`): if `False`, the trendline passes through\n the origin but if `True` a y-intercept is fitted.\n\n - `log_x` and `log_y` (`bool`, default `False`): if `True` the OLS is computed with\n respect to the base 10 logarithm of the input. Note that this means no zeros can\n be present in the input.\n """\n import numpy as np\n\n valid_options = ["add_constant", "log_x", "log_y"]\n for k in trendline_options.keys():\n if k not in valid_options:\n raise ValueError(\n "OLS trendline_options keys must be one of [%s] but got '%s'"\n % (", ".join(valid_options), k)\n )\n\n import statsmodels.api as sm\n\n add_constant = trendline_options.get("add_constant", True)\n log_x = trendline_options.get("log_x", False)\n log_y = trendline_options.get("log_y", False)\n\n if log_y:\n if np.any(y <= 0):\n raise ValueError(\n "Can't do OLS trendline with `log_y=True` when `y` contains non-positive values."\n )\n y = np.log10(y)\n y_label = "log10(%s)" % y_label\n if log_x:\n if np.any(x <= 0):\n raise ValueError(\n "Can't do OLS trendline with `log_x=True` when `x` contains non-positive values."\n )\n x = np.log10(x)\n x_label = "log10(%s)" % x_label\n if add_constant:\n x = sm.add_constant(x)\n fit_results = sm.OLS(y, x, missing="drop").fit()\n y_out = fit_results.predict()\n if log_y:\n y_out = np.power(10, y_out)\n hover_header = "<b>OLS trendline</b><br>"\n if len(fit_results.params) == 2:\n hover_header += "%s = %g * %s + %g<br>" % (\n y_label,\n fit_results.params[1],\n x_label,\n fit_results.params[0],\n )\n elif not add_constant:\n hover_header += "%s = %g * %s<br>" % (y_label, fit_results.params[0], x_label)\n else:\n hover_header += "%s = %g<br>" % (y_label, fit_results.params[0])\n hover_header += "R<sup>2</sup>=%f<br><br>" % fit_results.rsquared\n return y_out, hover_header, fit_results\n\n\ndef lowess(trendline_options, x_raw, x, y, x_label, y_label, non_missing):\n """LOcally WEighted Scatterplot Smoothing (LOWESS) trendline function\n\n Requires `statsmodels` to be installed.\n\n Valid keys for the `trendline_options` dict are:\n\n - `frac` (`float`, default `0.6666666`): the `frac` parameter from the\n `statsmodels.api.nonparametric.lowess` function\n """\n\n valid_options = ["frac"]\n for k in trendline_options.keys():\n if k not in valid_options:\n raise ValueError(\n "LOWESS trendline_options keys must be one of [%s] but got '%s'"\n % (", ".join(valid_options), k)\n )\n\n import statsmodels.api as sm\n\n frac = trendline_options.get("frac", 0.6666666)\n y_out = sm.nonparametric.lowess(y, x, missing="drop", frac=frac)[:, 1]\n hover_header = "<b>LOWESS trendline</b><br><br>"\n return y_out, hover_header, None\n\n\ndef _pandas(mode, trendline_options, x_raw, y, non_missing):\n import numpy as np\n\n try:\n import pandas as pd\n except ImportError:\n msg = "Trendline requires pandas to be installed"\n raise ImportError(msg)\n\n modes = dict(rolling="Rolling", ewm="Exponentially Weighted", expanding="Expanding")\n trendline_options = trendline_options.copy()\n function_name = trendline_options.pop("function", "mean")\n function_args = trendline_options.pop("function_args", dict())\n\n series = pd.Series(np.copy(y), index=x_raw.to_pandas())\n\n # TODO: Narwhals Series/DataFrame do not support rolling, ewm nor expanding, therefore\n # it fallbacks to pandas Series independently of the original type.\n # Plotly issue: https://github.com/plotly/plotly.py/issues/4834\n # Narwhals issue: https://github.com/narwhals-dev/narwhals/issues/1254\n agg = getattr(series, mode) # e.g. series.rolling\n agg_obj = agg(**trendline_options) # e.g. series.rolling(**opts)\n function = getattr(agg_obj, function_name) # e.g. series.rolling(**opts).mean\n y_out = function(**function_args) # e.g. series.rolling(**opts).mean(**opts)\n y_out = y_out[non_missing]\n hover_header = "<b>%s %s trendline</b><br><br>" % (modes[mode], function_name)\n return y_out, hover_header, None\n\n\ndef rolling(trendline_options, x_raw, x, y, x_label, y_label, non_missing):\n """Rolling trendline function\n\n The value of the `function` key of the `trendline_options` dict is the function to\n use (defaults to `mean`) and the value of the `function_args` key are taken to be\n its arguments as a dict. The remainder of the `trendline_options` dict is passed as\n keyword arguments into the `pandas.Series.rolling` function.\n """\n return _pandas("rolling", trendline_options, x_raw, y, non_missing)\n\n\ndef expanding(trendline_options, x_raw, x, y, x_label, y_label, non_missing):\n """Expanding trendline function\n\n The value of the `function` key of the `trendline_options` dict is the function to\n use (defaults to `mean`) and the value of the `function_args` key are taken to be\n its arguments as a dict. The remainder of the `trendline_options` dict is passed as\n keyword arguments into the `pandas.Series.expanding` function.\n """\n return _pandas("expanding", trendline_options, x_raw, y, non_missing)\n\n\ndef ewm(trendline_options, x_raw, x, y, x_label, y_label, non_missing):\n """Exponentially Weighted Moment (EWM) trendline function\n\n The value of the `function` key of the `trendline_options` dict is the function to\n use (defaults to `mean`) and the value of the `function_args` key are taken to be\n its arguments as a dict. The remainder of the `trendline_options` dict is passed as\n keyword arguments into the `pandas.Series.ewm` function.\n """\n return _pandas("ewm", trendline_options, x_raw, y, non_missing)\n
.venv\Lib\site-packages\plotly\express\trendline_functions\__init__.py
__init__.py
Python
6,945
0.95
0.270588
0.029851
node-utils
975
2024-08-01T19:47:39.691502
MIT
false
4395301b75443c32282ee4f7ed6204f9
\n\n
.venv\Lib\site-packages\plotly\express\trendline_functions\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
7,473
0.95
0.224138
0.039604
react-lib
651
2024-02-17T18:22:34.121275
Apache-2.0
false
e64bb37f138254a89be29041512fc3d8
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\imshow_utils.cpython-313.pyc
imshow_utils.cpython-313.pyc
Other
9,249
0.95
0.101695
0
node-utils
597
2024-06-28T20:43:27.316333
BSD-3-Clause
false
2718101820f7013e6015567907104233
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\_chart_types.cpython-313.pyc
_chart_types.cpython-313.pyc
Other
37,553
0.95
0.020602
0.006612
awesome-app
363
2024-09-15T07:48:46.731550
BSD-3-Clause
false
7b5be147a459320eda496addc53569b0
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\_core.cpython-313.pyc
_core.cpython-313.pyc
Other
110,696
0.75
0.028846
0.009054
awesome-app
116
2024-12-07T04:13:34.180639
BSD-3-Clause
false
7645db40da4d0a814834e3c700f62f13
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\_doc.cpython-313.pyc
_doc.cpython-313.pyc
Other
28,134
0.95
0.469565
0.008772
react-lib
403
2023-12-18T09:29:18.820970
MIT
false
c566521120d178c32e471638e1536604
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\_imshow.cpython-313.pyc
_imshow.cpython-313.pyc
Other
22,949
0.95
0.090343
0.007194
node-utils
805
2023-08-29T00:03:23.161702
GPL-3.0
false
5ffd33090a869ba34d1121e2d337be99
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\_special_inputs.cpython-313.pyc
_special_inputs.cpython-313.pyc
Other
2,373
0.95
0.26087
0
python-kit
200
2025-02-14T12:40:22.859295
Apache-2.0
false
06b56c9651c5ae0e6a6d4da4c96b0240
\n\n
.venv\Lib\site-packages\plotly\express\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
2,469
0.95
0.028571
0
awesome-app
413
2024-12-18T07:53:20.355708
GPL-3.0
false
0cab8e6da6f24d18d0e8f0eb146fe3e0
from collections.abc import Sequence\n\nfrom plotly import exceptions\n\n\ndef is_sequence(obj):\n return isinstance(obj, Sequence) and not isinstance(obj, str)\n\n\ndef validate_index(index_vals):\n """\n Validates if a list contains all numbers or all strings\n\n :raises: (PlotlyError) If there are any two items in the list whose\n types differ\n """\n from numbers import Number\n\n if isinstance(index_vals[0], Number):\n if not all(isinstance(item, Number) for item in index_vals):\n raise exceptions.PlotlyError(\n "Error in indexing column. "\n "Make sure all entries of each "\n "column are all numbers or "\n "all strings."\n )\n\n elif isinstance(index_vals[0], str):\n if not all(isinstance(item, str) for item in index_vals):\n raise exceptions.PlotlyError(\n "Error in indexing column. "\n "Make sure all entries of each "\n "column are all numbers or "\n "all strings."\n )\n\n\ndef validate_dataframe(array):\n """\n Validates all strings or numbers in each dataframe column\n\n :raises: (PlotlyError) If there are any two items in any list whose\n types differ\n """\n from numbers import Number\n\n for vector in array:\n if isinstance(vector[0], Number):\n if not all(isinstance(item, Number) for item in vector):\n raise exceptions.PlotlyError(\n "Error in dataframe. "\n "Make sure all entries of "\n "each column are either "\n "numbers or strings."\n )\n elif isinstance(vector[0], str):\n if not all(isinstance(item, str) for item in vector):\n raise exceptions.PlotlyError(\n "Error in dataframe. "\n "Make sure all entries of "\n "each column are either "\n "numbers or strings."\n )\n\n\ndef validate_equal_length(*args):\n """\n Validates that data lists or ndarrays are the same length.\n\n :raises: (PlotlyError) If any data lists are not the same length.\n """\n length = len(args[0])\n if any(len(lst) != length for lst in args):\n raise exceptions.PlotlyError(\n "Oops! Your data lists or ndarrays should be the same length."\n )\n\n\ndef validate_positive_scalars(**kwargs):\n """\n Validates that all values given in key/val pairs are positive.\n\n Accepts kwargs to improve Exception messages.\n\n :raises: (PlotlyError) If any value is < 0 or raises.\n """\n for key, val in kwargs.items():\n try:\n if val <= 0:\n raise ValueError("{} must be > 0, got {}".format(key, val))\n except TypeError:\n raise exceptions.PlotlyError("{} must be a number, got {}".format(key, val))\n\n\ndef flatten(array):\n """\n Uses list comprehension to flatten array\n\n :param (array): An iterable to flatten\n :raises (PlotlyError): If iterable is not nested.\n :rtype (list): The flattened list.\n """\n try:\n return [item for sublist in array for item in sublist]\n except TypeError:\n raise exceptions.PlotlyError(\n "Your data array could not be "\n "flattened! Make sure your data is "\n "entered as lists or ndarrays!"\n )\n\n\ndef endpts_to_intervals(endpts):\n """\n Returns a list of intervals for categorical colormaps\n\n Accepts a list or tuple of sequentially increasing numbers and returns\n a list representation of the mathematical intervals with these numbers\n as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]\n\n :raises: (PlotlyError) If input is not a list or tuple\n :raises: (PlotlyError) If the input contains a string\n :raises: (PlotlyError) If any number does not increase after the\n previous one in the sequence\n """\n length = len(endpts)\n # Check if endpts is a list or tuple\n if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):\n raise exceptions.PlotlyError(\n "The intervals_endpts argument must "\n "be a list or tuple of a sequence "\n "of increasing numbers."\n )\n # Check if endpts contains only numbers\n for item in endpts:\n if isinstance(item, str):\n raise exceptions.PlotlyError(\n "The intervals_endpts argument "\n "must be a list or tuple of a "\n "sequence of increasing "\n "numbers."\n )\n # Check if numbers in endpts are increasing\n for k in range(length - 1):\n if endpts[k] >= endpts[k + 1]:\n raise exceptions.PlotlyError(\n "The intervals_endpts argument "\n "must be a list or tuple of a "\n "sequence of increasing "\n "numbers."\n )\n else:\n intervals = []\n # add -inf to intervals\n intervals.append([float("-inf"), endpts[0]])\n for k in range(length - 1):\n interval = []\n interval.append(endpts[k])\n interval.append(endpts[k + 1])\n intervals.append(interval)\n # add +inf to intervals\n intervals.append([endpts[length - 1], float("inf")])\n return intervals\n\n\ndef annotation_dict_for_label(\n text,\n lane,\n num_of_lanes,\n subplot_spacing,\n row_col="col",\n flipped=True,\n right_side=True,\n text_color="#0f0f0f",\n):\n """\n Returns annotation dict for label of n labels of a 1xn or nx1 subplot.\n\n :param (str) text: the text for a label.\n :param (int) lane: the label number for text. From 1 to n inclusive.\n :param (int) num_of_lanes: the number 'n' of rows or columns in subplot.\n :param (float) subplot_spacing: the value for the horizontal_spacing and\n vertical_spacing params in your plotly.tools.make_subplots() call.\n :param (str) row_col: choose whether labels are placed along rows or\n columns.\n :param (bool) flipped: flips text by 90 degrees. Text is printed\n horizontally if set to True and row_col='row', or if False and\n row_col='col'.\n :param (bool) right_side: only applicable if row_col is set to 'row'.\n :param (str) text_color: color of the text.\n """\n temp = (1 - (num_of_lanes - 1) * subplot_spacing) / (num_of_lanes)\n if not flipped:\n xanchor = "center"\n yanchor = "middle"\n if row_col == "col":\n x = (lane - 1) * (temp + subplot_spacing) + 0.5 * temp\n y = 1.03\n textangle = 0\n elif row_col == "row":\n y = (lane - 1) * (temp + subplot_spacing) + 0.5 * temp\n x = 1.03\n textangle = 90\n else:\n if row_col == "col":\n xanchor = "center"\n yanchor = "bottom"\n x = (lane - 1) * (temp + subplot_spacing) + 0.5 * temp\n y = 1.0\n textangle = 270\n elif row_col == "row":\n yanchor = "middle"\n y = (lane - 1) * (temp + subplot_spacing) + 0.5 * temp\n if right_side:\n x = 1.0\n xanchor = "left"\n else:\n x = -0.01\n xanchor = "right"\n textangle = 0\n\n annotation_dict = dict(\n textangle=textangle,\n xanchor=xanchor,\n yanchor=yanchor,\n x=x,\n y=y,\n showarrow=False,\n xref="paper",\n yref="paper",\n text=text,\n font=dict(size=13, color=text_color),\n )\n return annotation_dict\n\n\ndef list_of_options(iterable, conj="and", period=True):\n """\n Returns an English listing of objects seperated by commas ','\n\n For example, ['foo', 'bar', 'baz'] becomes 'foo, bar and baz'\n if the conjunction 'and' is selected.\n """\n if len(iterable) < 2:\n raise exceptions.PlotlyError(\n "Your list or tuple must contain at least 2 items."\n )\n template = (len(iterable) - 2) * "{}, " + "{} " + conj + " {}" + period * "."\n return template.format(*iterable)\n
.venv\Lib\site-packages\plotly\figure_factory\utils.py
utils.py
Python
8,147
0.95
0.208835
0.023148
awesome-app
166
2024-07-23T05:12:27.347663
Apache-2.0
false
8464d009fb81ad6e0e612535f756c6b2
from numbers import Number\n\nimport plotly.exceptions\n\nimport plotly.colors as clrs\nfrom plotly.graph_objs import graph_objs\n\n\ndef make_linear_colorscale(colors):\n """\n Makes a list of colors into a colorscale-acceptable form\n\n For documentation regarding to the form of the output, see\n https://plot.ly/python/reference/#mesh3d-colorscale\n """\n scale = 1.0 / (len(colors) - 1)\n return [[i * scale, color] for i, color in enumerate(colors)]\n\n\ndef create_2d_density(\n x,\n y,\n colorscale="Earth",\n ncontours=20,\n hist_color=(0, 0, 0.5),\n point_color=(0, 0, 0.5),\n point_size=2,\n title="2D Density Plot",\n height=600,\n width=600,\n):\n """\n **deprecated**, use instead\n :func:`plotly.express.density_heatmap`.\n\n :param (list|array) x: x-axis data for plot generation\n :param (list|array) y: y-axis data for plot generation\n :param (str|tuple|list) colorscale: either a plotly scale name, an rgb\n or hex color, a color tuple or a list or tuple of colors. An rgb\n color is of the form 'rgb(x, y, z)' where x, y, z belong to the\n interval [0, 255] and a color tuple is a tuple of the form\n (a, b, c) where a, b and c belong to [0, 1]. If colormap is a\n list, it must contain the valid color types aforementioned as its\n members.\n :param (int) ncontours: the number of 2D contours to draw on the plot\n :param (str) hist_color: the color of the plotted histograms\n :param (str) point_color: the color of the scatter points\n :param (str) point_size: the color of the scatter points\n :param (str) title: set the title for the plot\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Examples\n --------\n\n Example 1: Simple 2D Density Plot\n\n >>> from plotly.figure_factory import create_2d_density\n >>> import numpy as np\n\n >>> # Make data points\n >>> t = np.linspace(-1,1.2,2000)\n >>> x = (t**3)+(0.3*np.random.randn(2000))\n >>> y = (t**6)+(0.3*np.random.randn(2000))\n\n >>> # Create a figure\n >>> fig = create_2d_density(x, y)\n\n >>> # Plot the data\n >>> fig.show()\n\n Example 2: Using Parameters\n\n >>> from plotly.figure_factory import create_2d_density\n\n >>> import numpy as np\n\n >>> # Make data points\n >>> t = np.linspace(-1,1.2,2000)\n >>> x = (t**3)+(0.3*np.random.randn(2000))\n >>> y = (t**6)+(0.3*np.random.randn(2000))\n\n >>> # Create custom colorscale\n >>> colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)',\n ... (1, 1, 0.2), (0.98,0.98,0.98)]\n\n >>> # Create a figure\n >>> fig = create_2d_density(x, y, colorscale=colorscale,\n ... hist_color='rgb(255, 237, 222)', point_size=3)\n\n >>> # Plot the data\n >>> fig.show()\n """\n\n # validate x and y are filled with numbers only\n for array in [x, y]:\n if not all(isinstance(element, Number) for element in array):\n raise plotly.exceptions.PlotlyError(\n "All elements of your 'x' and 'y' lists must be numbers."\n )\n\n # validate x and y are the same length\n if len(x) != len(y):\n raise plotly.exceptions.PlotlyError(\n "Both lists 'x' and 'y' must be the same length."\n )\n\n colorscale = clrs.validate_colors(colorscale, "rgb")\n colorscale = make_linear_colorscale(colorscale)\n\n # validate hist_color and point_color\n hist_color = clrs.validate_colors(hist_color, "rgb")\n point_color = clrs.validate_colors(point_color, "rgb")\n\n trace1 = graph_objs.Scatter(\n x=x,\n y=y,\n mode="markers",\n name="points",\n marker=dict(color=point_color[0], size=point_size, opacity=0.4),\n )\n trace2 = graph_objs.Histogram2dContour(\n x=x,\n y=y,\n name="density",\n ncontours=ncontours,\n colorscale=colorscale,\n reversescale=True,\n showscale=False,\n )\n trace3 = graph_objs.Histogram(\n x=x, name="x density", marker=dict(color=hist_color[0]), yaxis="y2"\n )\n trace4 = graph_objs.Histogram(\n y=y, name="y density", marker=dict(color=hist_color[0]), xaxis="x2"\n )\n data = [trace1, trace2, trace3, trace4]\n\n layout = graph_objs.Layout(\n showlegend=False,\n autosize=False,\n title=title,\n height=height,\n width=width,\n xaxis=dict(domain=[0, 0.85], showgrid=False, zeroline=False),\n yaxis=dict(domain=[0, 0.85], showgrid=False, zeroline=False),\n margin=dict(t=50),\n hovermode="closest",\n bargap=0,\n xaxis2=dict(domain=[0.85, 1], showgrid=False, zeroline=False),\n yaxis2=dict(domain=[0.85, 1], showgrid=False, zeroline=False),\n )\n\n fig = graph_objs.Figure(data=data, layout=layout)\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_2d_density.py
_2d_density.py
Python
4,796
0.95
0.064516
0.031496
vue-tools
926
2025-01-16T06:39:09.582229
BSD-3-Clause
false
ccba1edd6f15945e209e78b9bc336553
import plotly.colors as clrs\nfrom plotly import exceptions, optional_imports\nfrom plotly.figure_factory import utils\nfrom plotly.graph_objs import graph_objs\nfrom plotly.validator_cache import ValidatorCache\n\n# Optional imports, may be None for users that only use our core functionality.\nnp = optional_imports.get_module("numpy")\n\n\ndef validate_annotated_heatmap(z, x, y, annotation_text):\n """\n Annotated-heatmap-specific validations\n\n Check that if a text matrix is supplied, it has the same\n dimensions as the z matrix.\n\n See FigureFactory.create_annotated_heatmap() for params\n\n :raises: (PlotlyError) If z and text matrices do not have the same\n dimensions.\n """\n if annotation_text is not None and isinstance(annotation_text, list):\n utils.validate_equal_length(z, annotation_text)\n for lst in range(len(z)):\n if len(z[lst]) != len(annotation_text[lst]):\n raise exceptions.PlotlyError(\n "z and text should have the same dimensions"\n )\n\n if x:\n if len(x) != len(z[0]):\n raise exceptions.PlotlyError(\n "oops, the x list that you "\n "provided does not match the "\n "width of your z matrix "\n )\n\n if y:\n if len(y) != len(z):\n raise exceptions.PlotlyError(\n "oops, the y list that you "\n "provided does not match the "\n "length of your z matrix "\n )\n\n\ndef create_annotated_heatmap(\n z,\n x=None,\n y=None,\n annotation_text=None,\n colorscale="Plasma",\n font_colors=None,\n showscale=False,\n reversescale=False,\n **kwargs,\n):\n """\n **deprecated**, use instead\n :func:`plotly.express.imshow`.\n\n Function that creates annotated heatmaps\n\n This function adds annotations to each cell of the heatmap.\n\n :param (list[list]|ndarray) z: z matrix to create heatmap.\n :param (list) x: x axis labels.\n :param (list) y: y axis labels.\n :param (list[list]|ndarray) annotation_text: Text strings for\n annotations. Should have the same dimensions as the z matrix. If no\n text is added, the values of the z matrix are annotated. Default =\n z matrix values.\n :param (list|str) colorscale: heatmap colorscale.\n :param (list) font_colors: List of two color strings: [min_text_color,\n max_text_color] where min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. If font_colors is not\n defined, the colors are defined logically as black or white\n depending on the heatmap's colorscale.\n :param (bool) showscale: Display colorscale. Default = False\n :param (bool) reversescale: Reverse colorscale. Default = False\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple annotated heatmap with default configuration\n\n >>> import plotly.figure_factory as ff\n\n >>> z = [[0.300000, 0.00000, 0.65, 0.300000],\n ... [1, 0.100005, 0.45, 0.4300],\n ... [0.300000, 0.00000, 0.65, 0.300000],\n ... [1, 0.100005, 0.45, 0.00000]]\n\n >>> fig = ff.create_annotated_heatmap(z)\n >>> fig.show()\n """\n\n # Avoiding mutables in the call signature\n font_colors = font_colors if font_colors is not None else []\n validate_annotated_heatmap(z, x, y, annotation_text)\n\n # validate colorscale\n colorscale_validator = ValidatorCache.get_validator("heatmap", "colorscale")\n colorscale = colorscale_validator.validate_coerce(colorscale)\n\n annotations = _AnnotatedHeatmap(\n z, x, y, annotation_text, colorscale, font_colors, reversescale, **kwargs\n ).make_annotations()\n\n if x or y:\n trace = dict(\n type="heatmap",\n z=z,\n x=x,\n y=y,\n colorscale=colorscale,\n showscale=showscale,\n reversescale=reversescale,\n **kwargs,\n )\n layout = dict(\n annotations=annotations,\n xaxis=dict(ticks="", dtick=1, side="top", gridcolor="rgb(0, 0, 0)"),\n yaxis=dict(ticks="", dtick=1, ticksuffix=" "),\n )\n else:\n trace = dict(\n type="heatmap",\n z=z,\n colorscale=colorscale,\n showscale=showscale,\n reversescale=reversescale,\n **kwargs,\n )\n layout = dict(\n annotations=annotations,\n xaxis=dict(\n ticks="", side="top", gridcolor="rgb(0, 0, 0)", showticklabels=False\n ),\n yaxis=dict(ticks="", ticksuffix=" ", showticklabels=False),\n )\n\n data = [trace]\n\n return graph_objs.Figure(data=data, layout=layout)\n\n\ndef to_rgb_color_list(color_str, default):\n color_str = color_str.strip()\n if color_str.startswith("rgb"):\n return [int(v) for v in color_str.strip("rgba()").split(",")]\n elif color_str.startswith("#"):\n return clrs.hex_to_rgb(color_str)\n else:\n return default\n\n\ndef should_use_black_text(background_color):\n return (\n background_color[0] * 0.299\n + background_color[1] * 0.587\n + background_color[2] * 0.114\n ) > 186\n\n\nclass _AnnotatedHeatmap(object):\n """\n Refer to TraceFactory.create_annotated_heatmap() for docstring\n """\n\n def __init__(\n self, z, x, y, annotation_text, colorscale, font_colors, reversescale, **kwargs\n ):\n self.z = z\n if x:\n self.x = x\n else:\n self.x = range(len(z[0]))\n if y:\n self.y = y\n else:\n self.y = range(len(z))\n if annotation_text is not None:\n self.annotation_text = annotation_text\n else:\n self.annotation_text = self.z\n self.colorscale = colorscale\n self.reversescale = reversescale\n self.font_colors = font_colors\n\n if np and isinstance(self.z, np.ndarray):\n self.zmin = np.amin(self.z)\n self.zmax = np.amax(self.z)\n else:\n self.zmin = min([v for row in self.z for v in row])\n self.zmax = max([v for row in self.z for v in row])\n\n if kwargs.get("zmin", None) is not None:\n self.zmin = kwargs["zmin"]\n if kwargs.get("zmax", None) is not None:\n self.zmax = kwargs["zmax"]\n\n self.zmid = (self.zmax + self.zmin) / 2\n\n if kwargs.get("zmid", None) is not None:\n self.zmid = kwargs["zmid"]\n\n def get_text_color(self):\n """\n Get font color for annotations.\n\n The annotated heatmap can feature two text colors: min_text_color and\n max_text_color. The min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. The user can define these\n two colors. Otherwise the colors are defined logically as black or\n white depending on the heatmap's colorscale.\n\n :rtype (string, string) min_text_color, max_text_color: text\n color for annotations for heatmap values <\n (max_value - min_value)/2 and text color for annotations for\n heatmap values >= (max_value - min_value)/2\n """\n # Plotly colorscales ranging from a lighter shade to a darker shade\n colorscales = [\n "Greys",\n "Greens",\n "Blues",\n "YIGnBu",\n "YIOrRd",\n "RdBu",\n "Picnic",\n "Jet",\n "Hot",\n "Blackbody",\n "Earth",\n "Electric",\n "Viridis",\n "Cividis",\n ]\n # Plotly colorscales ranging from a darker shade to a lighter shade\n colorscales_reverse = ["Reds"]\n\n white = "#FFFFFF"\n black = "#000000"\n if self.font_colors:\n min_text_color = self.font_colors[0]\n max_text_color = self.font_colors[-1]\n elif self.colorscale in colorscales and self.reversescale:\n min_text_color = black\n max_text_color = white\n elif self.colorscale in colorscales:\n min_text_color = white\n max_text_color = black\n elif self.colorscale in colorscales_reverse and self.reversescale:\n min_text_color = white\n max_text_color = black\n elif self.colorscale in colorscales_reverse:\n min_text_color = black\n max_text_color = white\n elif isinstance(self.colorscale, list):\n min_col = to_rgb_color_list(self.colorscale[0][1], [255, 255, 255])\n max_col = to_rgb_color_list(self.colorscale[-1][1], [255, 255, 255])\n\n # swap min/max colors if reverse scale\n if self.reversescale:\n min_col, max_col = max_col, min_col\n\n if should_use_black_text(min_col):\n min_text_color = black\n else:\n min_text_color = white\n\n if should_use_black_text(max_col):\n max_text_color = black\n else:\n max_text_color = white\n else:\n min_text_color = black\n max_text_color = black\n return min_text_color, max_text_color\n\n def make_annotations(self):\n """\n Get annotations for each cell of the heatmap with graph_objs.Annotation\n\n :rtype (list[dict]) annotations: list of annotations for each cell of\n the heatmap\n """\n min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)\n annotations = []\n for n, row in enumerate(self.z):\n for m, val in enumerate(row):\n font_color = min_text_color if val < self.zmid else max_text_color\n annotations.append(\n graph_objs.layout.Annotation(\n text=str(self.annotation_text[n][m]),\n x=self.x[m],\n y=self.y[n],\n xref="x1",\n yref="y1",\n font=dict(color=font_color),\n showarrow=False,\n )\n )\n return annotations\n
.venv\Lib\site-packages\plotly\figure_factory\_annotated_heatmap.py
_annotated_heatmap.py
Python
10,372
0.95
0.172638
0.037879
node-utils
730
2024-10-09T09:24:45.282273
MIT
false
7f81be1d6f00f1b36e93d0a94c5c8fe0
import math\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\n\nimport plotly\nimport plotly.graph_objs as go\n\npd = optional_imports.get_module("pandas")\n\n\ndef _bullet(\n df,\n markers,\n measures,\n ranges,\n subtitles,\n titles,\n orientation,\n range_colors,\n measure_colors,\n horizontal_spacing,\n vertical_spacing,\n scatter_options,\n layout_options,\n):\n num_of_lanes = len(df)\n num_of_rows = num_of_lanes if orientation == "h" else 1\n num_of_cols = 1 if orientation == "h" else num_of_lanes\n if not horizontal_spacing:\n horizontal_spacing = 1.0 / num_of_lanes\n if not vertical_spacing:\n vertical_spacing = 1.0 / num_of_lanes\n fig = plotly.subplots.make_subplots(\n num_of_rows,\n num_of_cols,\n print_grid=False,\n horizontal_spacing=horizontal_spacing,\n vertical_spacing=vertical_spacing,\n )\n\n # layout\n fig["layout"].update(\n dict(shapes=[]),\n title="Bullet Chart",\n height=600,\n width=1000,\n showlegend=False,\n barmode="stack",\n annotations=[],\n margin=dict(l=120 if orientation == "h" else 80),\n )\n\n # update layout\n fig["layout"].update(layout_options)\n\n if orientation == "h":\n width_axis = "yaxis"\n length_axis = "xaxis"\n else:\n width_axis = "xaxis"\n length_axis = "yaxis"\n\n for key in fig["layout"]:\n if "xaxis" in key or "yaxis" in key:\n fig["layout"][key]["showgrid"] = False\n fig["layout"][key]["zeroline"] = False\n if length_axis in key:\n fig["layout"][key]["tickwidth"] = 1\n if width_axis in key:\n fig["layout"][key]["showticklabels"] = False\n fig["layout"][key]["range"] = [0, 1]\n\n # narrow domain if 1 bar\n if num_of_lanes <= 1:\n fig["layout"][width_axis + "1"]["domain"] = [0.4, 0.6]\n\n if not range_colors:\n range_colors = ["rgb(200, 200, 200)", "rgb(245, 245, 245)"]\n if not measure_colors:\n measure_colors = ["rgb(31, 119, 180)", "rgb(176, 196, 221)"]\n\n for row in range(num_of_lanes):\n # ranges bars\n for idx in range(len(df.iloc[row]["ranges"])):\n inter_colors = clrs.n_colors(\n range_colors[0], range_colors[1], len(df.iloc[row]["ranges"]), "rgb"\n )\n x = (\n [sorted(df.iloc[row]["ranges"])[-1 - idx]]\n if orientation == "h"\n else [0]\n )\n y = (\n [0]\n if orientation == "h"\n else [sorted(df.iloc[row]["ranges"])[-1 - idx]]\n )\n bar = go.Bar(\n x=x,\n y=y,\n marker=dict(color=inter_colors[-1 - idx]),\n name="ranges",\n hoverinfo="x" if orientation == "h" else "y",\n orientation=orientation,\n width=2,\n base=0,\n xaxis="x{}".format(row + 1),\n yaxis="y{}".format(row + 1),\n )\n fig.add_trace(bar)\n\n # measures bars\n for idx in range(len(df.iloc[row]["measures"])):\n inter_colors = clrs.n_colors(\n measure_colors[0],\n measure_colors[1],\n len(df.iloc[row]["measures"]),\n "rgb",\n )\n x = (\n [sorted(df.iloc[row]["measures"])[-1 - idx]]\n if orientation == "h"\n else [0.5]\n )\n y = (\n [0.5]\n if orientation == "h"\n else [sorted(df.iloc[row]["measures"])[-1 - idx]]\n )\n bar = go.Bar(\n x=x,\n y=y,\n marker=dict(color=inter_colors[-1 - idx]),\n name="measures",\n hoverinfo="x" if orientation == "h" else "y",\n orientation=orientation,\n width=0.4,\n base=0,\n xaxis="x{}".format(row + 1),\n yaxis="y{}".format(row + 1),\n )\n fig.add_trace(bar)\n\n # markers\n x = df.iloc[row]["markers"] if orientation == "h" else [0.5]\n y = [0.5] if orientation == "h" else df.iloc[row]["markers"]\n markers = go.Scatter(\n x=x,\n y=y,\n name="markers",\n hoverinfo="x" if orientation == "h" else "y",\n xaxis="x{}".format(row + 1),\n yaxis="y{}".format(row + 1),\n **scatter_options,\n )\n\n fig.add_trace(markers)\n\n # titles and subtitles\n title = df.iloc[row]["titles"]\n if "subtitles" in df:\n subtitle = "<br>{}".format(df.iloc[row]["subtitles"])\n else:\n subtitle = ""\n label = "<b>{}</b>".format(title) + subtitle\n annot = utils.annotation_dict_for_label(\n label,\n (num_of_lanes - row if orientation == "h" else row + 1),\n num_of_lanes,\n vertical_spacing if orientation == "h" else horizontal_spacing,\n "row" if orientation == "h" else "col",\n True if orientation == "h" else False,\n False,\n )\n fig["layout"]["annotations"] += (annot,)\n\n return fig\n\n\ndef create_bullet(\n data,\n markers=None,\n measures=None,\n ranges=None,\n subtitles=None,\n titles=None,\n orientation="h",\n range_colors=("rgb(200, 200, 200)", "rgb(245, 245, 245)"),\n measure_colors=("rgb(31, 119, 180)", "rgb(176, 196, 221)"),\n horizontal_spacing=None,\n vertical_spacing=None,\n scatter_options={},\n **layout_options,\n):\n """\n **deprecated**, use instead the plotly.graph_objects trace\n :class:`plotly.graph_objects.Indicator`.\n\n :param (pd.DataFrame | list | tuple) data: either a list/tuple of\n dictionaries or a pandas DataFrame.\n :param (str) markers: the column name or dictionary key for the markers in\n each subplot.\n :param (str) measures: the column name or dictionary key for the measure\n bars in each subplot. This bar usually represents the quantitative\n measure of performance, usually a list of two values [a, b] and are\n the blue bars in the foreground of each subplot by default.\n :param (str) ranges: the column name or dictionary key for the qualitative\n ranges of performance, usually a 3-item list [bad, okay, good]. They\n correspond to the grey bars in the background of each chart.\n :param (str) subtitles: the column name or dictionary key for the subtitle\n of each subplot chart. The subplots are displayed right underneath\n each title.\n :param (str) titles: the column name or dictionary key for the main label\n of each subplot chart.\n :param (bool) orientation: if 'h', the bars are placed horizontally as\n rows. If 'v' the bars are placed vertically in the chart.\n :param (list) range_colors: a tuple of two colors between which all\n the rectangles for the range are drawn. These rectangles are meant to\n be qualitative indicators against which the marker and measure bars\n are compared.\n Default=('rgb(200, 200, 200)', 'rgb(245, 245, 245)')\n :param (list) measure_colors: a tuple of two colors which is used to color\n the thin quantitative bars in the bullet chart.\n Default=('rgb(31, 119, 180)', 'rgb(176, 196, 221)')\n :param (float) horizontal_spacing: see the 'horizontal_spacing' param in\n plotly.tools.make_subplots. Ranges between 0 and 1.\n :param (float) vertical_spacing: see the 'vertical_spacing' param in\n plotly.tools.make_subplots. Ranges between 0 and 1.\n :param (dict) scatter_options: describes attributes for the scatter trace\n in each subplot such as name and marker size. Call\n help(plotly.graph_objs.Scatter) for more information on valid params.\n :param layout_options: describes attributes for the layout of the figure\n such as title, height and width. Call help(plotly.graph_objs.Layout)\n for more information on valid params.\n\n Example 1: Use a Dictionary\n\n >>> import plotly.figure_factory as ff\n\n >>> data = [\n ... {"label": "revenue", "sublabel": "us$, in thousands",\n ... "range": [150, 225, 300], "performance": [220,270], "point": [250]},\n ... {"label": "Profit", "sublabel": "%", "range": [20, 25, 30],\n ... "performance": [21, 23], "point": [26]},\n ... {"label": "Order Size", "sublabel":"US$, average","range": [350, 500, 600],\n ... "performance": [100,320],"point": [550]},\n ... {"label": "New Customers", "sublabel": "count", "range": [1400, 2000, 2500],\n ... "performance": [1000, 1650],"point": [2100]},\n ... {"label": "Satisfaction", "sublabel": "out of 5","range": [3.5, 4.25, 5],\n ... "performance": [3.2, 4.7], "point": [4.4]}\n ... ]\n\n >>> fig = ff.create_bullet(\n ... data, titles='label', subtitles='sublabel', markers='point',\n ... measures='performance', ranges='range', orientation='h',\n ... title='my simple bullet chart'\n ... )\n >>> fig.show()\n\n Example 2: Use a DataFrame with Custom Colors\n\n >>> import plotly.figure_factory as ff\n >>> import pandas as pd\n >>> data = pd.read_json('https://cdn.rawgit.com/plotly/datasets/master/BulletData.json')\n\n >>> fig = ff.create_bullet(\n ... data, titles='title', markers='markers', measures='measures',\n ... orientation='v', measure_colors=['rgb(14, 52, 75)', 'rgb(31, 141, 127)'],\n ... scatter_options={'marker': {'symbol': 'circle'}}, width=700)\n >>> fig.show()\n """\n # validate df\n if not pd:\n raise ImportError("'pandas' must be installed for this figure factory.")\n\n if utils.is_sequence(data):\n if not all(isinstance(item, dict) for item in data):\n raise exceptions.PlotlyError(\n "Every entry of the data argument list, tuple, etc must "\n "be a dictionary."\n )\n\n elif not isinstance(data, pd.DataFrame):\n raise exceptions.PlotlyError(\n "You must input a pandas DataFrame, or a list of dictionaries."\n )\n\n # make DataFrame from data with correct column headers\n col_names = ["titles", "subtitle", "markers", "measures", "ranges"]\n if utils.is_sequence(data):\n df = pd.DataFrame(\n [\n [d[titles] for d in data] if titles else [""] * len(data),\n [d[subtitles] for d in data] if subtitles else [""] * len(data),\n [d[markers] for d in data] if markers else [[]] * len(data),\n [d[measures] for d in data] if measures else [[]] * len(data),\n [d[ranges] for d in data] if ranges else [[]] * len(data),\n ],\n index=col_names,\n )\n elif isinstance(data, pd.DataFrame):\n df = pd.DataFrame(\n [\n data[titles].tolist() if titles else [""] * len(data),\n data[subtitles].tolist() if subtitles else [""] * len(data),\n data[markers].tolist() if markers else [[]] * len(data),\n data[measures].tolist() if measures else [[]] * len(data),\n data[ranges].tolist() if ranges else [[]] * len(data),\n ],\n index=col_names,\n )\n df = pd.DataFrame.transpose(df)\n\n # make sure ranges, measures, 'markers' are not NAN or NONE\n for needed_key in ["ranges", "measures", "markers"]:\n for idx, r in enumerate(df[needed_key]):\n try:\n r_is_nan = math.isnan(r)\n if r_is_nan or r is None:\n df[needed_key][idx] = []\n except TypeError:\n pass\n\n # validate custom colors\n for colors_list in [range_colors, measure_colors]:\n if colors_list:\n if len(colors_list) != 2:\n raise exceptions.PlotlyError(\n "Both 'range_colors' or 'measure_colors' must be a list "\n "of two valid colors."\n )\n clrs.validate_colors(colors_list)\n colors_list = clrs.convert_colors_to_same_type(colors_list, "rgb")[0]\n\n # default scatter options\n default_scatter = {\n "marker": {"size": 12, "symbol": "diamond-tall", "color": "rgb(0, 0, 0)"}\n }\n\n if scatter_options == {}:\n scatter_options.update(default_scatter)\n else:\n # add default options to scatter_options if they are not present\n for k in default_scatter["marker"]:\n if k not in scatter_options["marker"]:\n scatter_options["marker"][k] = default_scatter["marker"][k]\n\n fig = _bullet(\n df,\n markers,\n measures,\n ranges,\n subtitles,\n titles,\n orientation,\n range_colors,\n measure_colors,\n horizontal_spacing,\n vertical_spacing,\n scatter_options,\n layout_options,\n )\n\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_bullet.py
_bullet.py
Python
13,093
0.95
0.210383
0.048485
python-kit
231
2024-04-22T04:05:17.103343
MIT
false
b11f37af5fd1469a6c16202e2f7b7157
from plotly.figure_factory import utils\nfrom plotly.figure_factory._ohlc import (\n _DEFAULT_INCREASING_COLOR,\n _DEFAULT_DECREASING_COLOR,\n validate_ohlc,\n)\nfrom plotly.graph_objs import graph_objs\n\n\ndef make_increasing_candle(open, high, low, close, dates, **kwargs):\n """\n Makes boxplot trace for increasing candlesticks\n\n _make_increasing_candle() and _make_decreasing_candle separate the\n increasing traces from the decreasing traces so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_incr_data: list of the box trace for\n increasing candlesticks.\n """\n increase_x, increase_y = _Candlestick(\n open, high, low, close, dates, **kwargs\n ).get_candle_increase()\n\n if "line" in kwargs:\n kwargs.setdefault("fillcolor", kwargs["line"]["color"])\n else:\n kwargs.setdefault("fillcolor", _DEFAULT_INCREASING_COLOR)\n if "name" in kwargs:\n kwargs.setdefault("showlegend", True)\n else:\n kwargs.setdefault("showlegend", False)\n kwargs.setdefault("name", "Increasing")\n kwargs.setdefault("line", dict(color=_DEFAULT_INCREASING_COLOR))\n\n candle_incr_data = dict(\n type="box",\n x=increase_x,\n y=increase_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs,\n )\n\n return [candle_incr_data]\n\n\ndef make_decreasing_candle(open, high, low, close, dates, **kwargs):\n """\n Makes boxplot trace for decreasing candlesticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to decreasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (list) candle_decr_data: list of the box trace for\n decreasing candlesticks.\n """\n\n decrease_x, decrease_y = _Candlestick(\n open, high, low, close, dates, **kwargs\n ).get_candle_decrease()\n\n if "line" in kwargs:\n kwargs.setdefault("fillcolor", kwargs["line"]["color"])\n else:\n kwargs.setdefault("fillcolor", _DEFAULT_DECREASING_COLOR)\n kwargs.setdefault("showlegend", False)\n kwargs.setdefault("line", dict(color=_DEFAULT_DECREASING_COLOR))\n kwargs.setdefault("name", "Decreasing")\n\n candle_decr_data = dict(\n type="box",\n x=decrease_x,\n y=decrease_y,\n whiskerwidth=0,\n boxpoints=False,\n **kwargs,\n )\n\n return [candle_decr_data]\n\n\ndef create_candlestick(open, high, low, close, dates=None, direction="both", **kwargs):\n """\n **deprecated**, use instead the plotly.graph_objects trace\n :class:`plotly.graph_objects.Candlestick`\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all candlesticks where the close value is greater than\n the corresponding open value, and when the direction is\n 'decreasing', the returned figure consists of all candlesticks\n where the close value is less than or equal to the corresponding\n open value. When the direction is 'both', both increasing and\n decreasing candlesticks are returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of candlestick chart figure.\n\n Example 1: Simple candlestick chart from a Pandas DataFrame\n\n >>> from plotly.figure_factory import create_candlestick\n >>> from datetime import datetime\n >>> import pandas as pd\n\n >>> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')\n >>> fig = create_candlestick(df['AAPL.Open'], df['AAPL.High'], df['AAPL.Low'], df['AAPL.Close'],\n ... dates=df.index)\n >>> fig.show()\n\n Example 2: Customize the candlestick colors\n\n >>> from plotly.figure_factory import create_candlestick\n >>> from plotly.graph_objs import Line, Marker\n >>> from datetime import datetime\n\n >>> import pandas as pd\n >>> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')\n\n >>> # Make increasing candlesticks and customize their color and name\n >>> fig_increasing = create_candlestick(df['AAPL.Open'], df['AAPL.High'], df['AAPL.Low'], df['AAPL.Close'],\n ... dates=df.index,\n ... direction='increasing', name='AAPL',\n ... marker=Marker(color='rgb(150, 200, 250)'),\n ... line=Line(color='rgb(150, 200, 250)'))\n\n >>> # Make decreasing candlesticks and customize their color and name\n >>> fig_decreasing = create_candlestick(df['AAPL.Open'], df['AAPL.High'], df['AAPL.Low'], df['AAPL.Close'],\n ... dates=df.index,\n ... direction='decreasing',\n ... marker=Marker(color='rgb(128, 128, 128)'),\n ... line=Line(color='rgb(128, 128, 128)'))\n\n >>> # Initialize the figure\n >>> fig = fig_increasing\n\n >>> # Add decreasing data with .extend()\n >>> fig.add_trace(fig_decreasing['data']) # doctest: +SKIP\n >>> fig.show()\n\n Example 3: Candlestick chart with datetime objects\n\n >>> from plotly.figure_factory import create_candlestick\n\n >>> from datetime import datetime\n\n >>> # Add data\n >>> open_data = [33.0, 33.3, 33.5, 33.0, 34.1]\n >>> high_data = [33.1, 33.3, 33.6, 33.2, 34.8]\n >>> low_data = [32.7, 32.7, 32.8, 32.6, 32.8]\n >>> close_data = [33.0, 32.9, 33.3, 33.1, 33.1]\n >>> dates = [datetime(year=2013, month=10, day=10),\n ... datetime(year=2013, month=11, day=10),\n ... datetime(year=2013, month=12, day=10),\n ... datetime(year=2014, month=1, day=10),\n ... datetime(year=2014, month=2, day=10)]\n\n >>> # Create ohlc\n >>> fig = create_candlestick(open_data, high_data,\n ... low_data, close_data, dates=dates)\n >>> fig.show()\n """\n if dates is not None:\n utils.validate_equal_length(open, high, low, close, dates)\n else:\n utils.validate_equal_length(open, high, low, close)\n validate_ohlc(open, high, low, close, direction, **kwargs)\n\n if direction == "increasing":\n candle_incr_data = make_increasing_candle(\n open, high, low, close, dates, **kwargs\n )\n data = candle_incr_data\n elif direction == "decreasing":\n candle_decr_data = make_decreasing_candle(\n open, high, low, close, dates, **kwargs\n )\n data = candle_decr_data\n else:\n candle_incr_data = make_increasing_candle(\n open, high, low, close, dates, **kwargs\n )\n candle_decr_data = make_decreasing_candle(\n open, high, low, close, dates, **kwargs\n )\n data = candle_incr_data + candle_decr_data\n\n layout = graph_objs.Layout()\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Candlestick(object):\n """\n Refer to FigureFactory.create_candlestick() for docstring.\n """\n\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n if dates is not None:\n self.x = dates\n else:\n self.x = [x for x in range(len(self.open))]\n self.get_candle_increase()\n\n def get_candle_increase(self):\n """\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n """\n increase_y = []\n increase_x = []\n for index in range(len(self.open)):\n if self.close[index] > self.open[index]:\n increase_y.append(self.low[index])\n increase_y.append(self.open[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.close[index])\n increase_y.append(self.high[index])\n increase_x.append(self.x[index])\n\n increase_x = [[x, x, x, x, x, x] for x in increase_x]\n increase_x = utils.flatten(increase_x)\n\n return increase_x, increase_y\n\n def get_candle_decrease(self):\n """\n Separate increasing data from decreasing data.\n\n The data is increasing when close value > open value\n and decreasing when the close value <= open value.\n """\n decrease_y = []\n decrease_x = []\n for index in range(len(self.open)):\n if self.close[index] <= self.open[index]:\n decrease_y.append(self.low[index])\n decrease_y.append(self.open[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.close[index])\n decrease_y.append(self.high[index])\n decrease_x.append(self.x[index])\n\n decrease_x = [[x, x, x, x, x, x] for x in decrease_x]\n decrease_x = utils.flatten(decrease_x)\n\n return decrease_x, decrease_y\n
.venv\Lib\site-packages\plotly\figure_factory\_candlestick.py
_candlestick.py
Python
10,038
0.95
0.093863
0.0131
node-utils
35
2023-12-18T00:20:20.716199
Apache-2.0
false
ca3d7924389a793507904f2882511290
import io\nimport numpy as np\nimport os\nimport pandas as pd\nimport warnings\n\nfrom math import log, floor\nfrom numbers import Number\n\nfrom plotly import optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nfrom plotly.exceptions import PlotlyError\nimport plotly.graph_objs as go\n\npd.options.mode.chained_assignment = None\n\nshapely = optional_imports.get_module("shapely")\nshapefile = optional_imports.get_module("shapefile")\ngp = optional_imports.get_module("geopandas")\n_plotly_geo = optional_imports.get_module("_plotly_geo")\n\n\ndef _create_us_counties_df(st_to_state_name_dict, state_to_st_dict):\n # URLS\n abs_dir_path = os.path.realpath(_plotly_geo.__file__)\n\n abs_plotly_geo_path = os.path.dirname(abs_dir_path)\n\n abs_package_data_dir_path = os.path.join(abs_plotly_geo_path, "package_data")\n\n shape_pre2010 = "gz_2010_us_050_00_500k.shp"\n shape_pre2010 = os.path.join(abs_package_data_dir_path, shape_pre2010)\n\n df_shape_pre2010 = gp.read_file(shape_pre2010)\n df_shape_pre2010["FIPS"] = df_shape_pre2010["STATE"] + df_shape_pre2010["COUNTY"]\n df_shape_pre2010["FIPS"] = pd.to_numeric(df_shape_pre2010["FIPS"])\n\n states_path = "cb_2016_us_state_500k.shp"\n states_path = os.path.join(abs_package_data_dir_path, states_path)\n\n df_state = gp.read_file(states_path)\n df_state = df_state[["STATEFP", "NAME", "geometry"]]\n df_state = df_state.rename(columns={"NAME": "STATE_NAME"})\n\n filenames = [\n "cb_2016_us_county_500k.dbf",\n "cb_2016_us_county_500k.shp",\n "cb_2016_us_county_500k.shx",\n ]\n\n for j in range(len(filenames)):\n filenames[j] = os.path.join(abs_package_data_dir_path, filenames[j])\n\n dbf = io.open(filenames[0], "rb")\n shp = io.open(filenames[1], "rb")\n shx = io.open(filenames[2], "rb")\n\n r = shapefile.Reader(shp=shp, shx=shx, dbf=dbf)\n\n attributes, geometry = [], []\n field_names = [field[0] for field in r.fields[1:]]\n for row in r.shapeRecords():\n geometry.append(shapely.geometry.shape(row.shape.__geo_interface__))\n attributes.append(dict(zip(field_names, row.record)))\n\n gdf = gp.GeoDataFrame(data=attributes, geometry=geometry)\n\n gdf["FIPS"] = gdf["STATEFP"] + gdf["COUNTYFP"]\n gdf["FIPS"] = pd.to_numeric(gdf["FIPS"])\n\n # add missing counties\n f = 46113\n singlerow = pd.DataFrame(\n [\n [\n st_to_state_name_dict["SD"],\n "SD",\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["geometry"].iloc[0],\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["FIPS"].iloc[0],\n "46",\n "Shannon",\n ]\n ],\n columns=["State", "ST", "geometry", "FIPS", "STATEFP", "NAME"],\n index=[max(gdf.index) + 1],\n )\n gdf = pd.concat([gdf, singlerow], sort=True)\n\n f = 51515\n singlerow = pd.DataFrame(\n [\n [\n st_to_state_name_dict["VA"],\n "VA",\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["geometry"].iloc[0],\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["FIPS"].iloc[0],\n "51",\n "Bedford City",\n ]\n ],\n columns=["State", "ST", "geometry", "FIPS", "STATEFP", "NAME"],\n index=[max(gdf.index) + 1],\n )\n gdf = pd.concat([gdf, singlerow], sort=True)\n\n f = 2270\n singlerow = pd.DataFrame(\n [\n [\n st_to_state_name_dict["AK"],\n "AK",\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["geometry"].iloc[0],\n df_shape_pre2010[df_shape_pre2010["FIPS"] == f]["FIPS"].iloc[0],\n "02",\n "Wade Hampton",\n ]\n ],\n columns=["State", "ST", "geometry", "FIPS", "STATEFP", "NAME"],\n index=[max(gdf.index) + 1],\n )\n gdf = pd.concat([gdf, singlerow], sort=True)\n\n row_2198 = gdf[gdf["FIPS"] == 2198]\n row_2198.index = [max(gdf.index) + 1]\n row_2198.loc[row_2198.index[0], "FIPS"] = 2201\n row_2198.loc[row_2198.index[0], "STATEFP"] = "02"\n gdf = pd.concat([gdf, row_2198], sort=True)\n\n row_2105 = gdf[gdf["FIPS"] == 2105]\n row_2105.index = [max(gdf.index) + 1]\n row_2105.loc[row_2105.index[0], "FIPS"] = 2232\n row_2105.loc[row_2105.index[0], "STATEFP"] = "02"\n gdf = pd.concat([gdf, row_2105], sort=True)\n gdf = gdf.rename(columns={"NAME": "COUNTY_NAME"})\n\n gdf_reduced = gdf[["FIPS", "STATEFP", "COUNTY_NAME", "geometry"]]\n gdf_statefp = gdf_reduced.merge(df_state[["STATEFP", "STATE_NAME"]], on="STATEFP")\n\n ST = []\n for n in gdf_statefp["STATE_NAME"]:\n ST.append(state_to_st_dict[n])\n\n gdf_statefp["ST"] = ST\n return gdf_statefp, df_state\n\n\nst_to_state_name_dict = {\n "AK": "Alaska",\n "AL": "Alabama",\n "AR": "Arkansas",\n "AZ": "Arizona",\n "CA": "California",\n "CO": "Colorado",\n "CT": "Connecticut",\n "DC": "District of Columbia",\n "DE": "Delaware",\n "FL": "Florida",\n "GA": "Georgia",\n "HI": "Hawaii",\n "IA": "Iowa",\n "ID": "Idaho",\n "IL": "Illinois",\n "IN": "Indiana",\n "KS": "Kansas",\n "KY": "Kentucky",\n "LA": "Louisiana",\n "MA": "Massachusetts",\n "MD": "Maryland",\n "ME": "Maine",\n "MI": "Michigan",\n "MN": "Minnesota",\n "MO": "Missouri",\n "MS": "Mississippi",\n "MT": "Montana",\n "NC": "North Carolina",\n "ND": "North Dakota",\n "NE": "Nebraska",\n "NH": "New Hampshire",\n "NJ": "New Jersey",\n "NM": "New Mexico",\n "NV": "Nevada",\n "NY": "New York",\n "OH": "Ohio",\n "OK": "Oklahoma",\n "OR": "Oregon",\n "PA": "Pennsylvania",\n "RI": "Rhode Island",\n "SC": "South Carolina",\n "SD": "South Dakota",\n "TN": "Tennessee",\n "TX": "Texas",\n "UT": "Utah",\n "VA": "Virginia",\n "VT": "Vermont",\n "WA": "Washington",\n "WI": "Wisconsin",\n "WV": "West Virginia",\n "WY": "Wyoming",\n}\n\nstate_to_st_dict = {\n "Alabama": "AL",\n "Alaska": "AK",\n "American Samoa": "AS",\n "Arizona": "AZ",\n "Arkansas": "AR",\n "California": "CA",\n "Colorado": "CO",\n "Commonwealth of the Northern Mariana Islands": "MP",\n "Connecticut": "CT",\n "Delaware": "DE",\n "District of Columbia": "DC",\n "Florida": "FL",\n "Georgia": "GA",\n "Guam": "GU",\n "Hawaii": "HI",\n "Idaho": "ID",\n "Illinois": "IL",\n "Indiana": "IN",\n "Iowa": "IA",\n "Kansas": "KS",\n "Kentucky": "KY",\n "Louisiana": "LA",\n "Maine": "ME",\n "Maryland": "MD",\n "Massachusetts": "MA",\n "Michigan": "MI",\n "Minnesota": "MN",\n "Mississippi": "MS",\n "Missouri": "MO",\n "Montana": "MT",\n "Nebraska": "NE",\n "Nevada": "NV",\n "New Hampshire": "NH",\n "New Jersey": "NJ",\n "New Mexico": "NM",\n "New York": "NY",\n "North Carolina": "NC",\n "North Dakota": "ND",\n "Ohio": "OH",\n "Oklahoma": "OK",\n "Oregon": "OR",\n "Pennsylvania": "PA",\n "Puerto Rico": "",\n "Rhode Island": "RI",\n "South Carolina": "SC",\n "South Dakota": "SD",\n "Tennessee": "TN",\n "Texas": "TX",\n "United States Virgin Islands": "VI",\n "Utah": "UT",\n "Vermont": "VT",\n "Virginia": "VA",\n "Washington": "WA",\n "West Virginia": "WV",\n "Wisconsin": "WI",\n "Wyoming": "WY",\n}\n\nUSA_XRANGE = [-125.0, -65.0]\nUSA_YRANGE = [25.0, 49.0]\n\n\ndef _human_format(number):\n units = ["", "K", "M", "G", "T", "P"]\n k = 1000.0\n magnitude = int(floor(log(number, k)))\n return "%.2f%s" % (number / k**magnitude, units[magnitude])\n\n\ndef _intervals_as_labels(array_of_intervals, round_legend_values, exponent_format):\n """\n Transform an number interval to a clean string for legend\n\n Example: [-inf, 30] to '< 30'\n """\n infs = [float("-inf"), float("inf")]\n string_intervals = []\n for interval in array_of_intervals:\n # round to 2nd decimal place\n if round_legend_values:\n rnd_interval = [\n (int(interval[i]) if interval[i] not in infs else interval[i])\n for i in range(2)\n ]\n else:\n rnd_interval = [round(interval[0], 2), round(interval[1], 2)]\n\n num0 = rnd_interval[0]\n num1 = rnd_interval[1]\n if exponent_format:\n if num0 not in infs:\n num0 = _human_format(num0)\n if num1 not in infs:\n num1 = _human_format(num1)\n else:\n if num0 not in infs:\n num0 = "{:,}".format(num0)\n if num1 not in infs:\n num1 = "{:,}".format(num1)\n\n if num0 == float("-inf"):\n as_str = "< {}".format(num1)\n elif num1 == float("inf"):\n as_str = "> {}".format(num0)\n else:\n as_str = "{} - {}".format(num0, num1)\n string_intervals.append(as_str)\n return string_intervals\n\n\ndef _calculations(\n df,\n fips,\n values,\n index,\n f,\n simplify_county,\n level,\n x_centroids,\n y_centroids,\n centroid_text,\n x_traces,\n y_traces,\n fips_polygon_map,\n):\n # 0-pad FIPS code to ensure exactly 5 digits\n padded_f = str(f).zfill(5)\n if fips_polygon_map[f].type == "Polygon":\n x = fips_polygon_map[f].simplify(simplify_county).exterior.xy[0].tolist()\n y = fips_polygon_map[f].simplify(simplify_county).exterior.xy[1].tolist()\n\n x_c, y_c = fips_polygon_map[f].centroid.xy\n county_name_str = str(df[df["FIPS"] == f]["COUNTY_NAME"].iloc[0])\n state_name_str = str(df[df["FIPS"] == f]["STATE_NAME"].iloc[0])\n\n t_c = (\n "County: "\n + county_name_str\n + "<br>"\n + "State: "\n + state_name_str\n + "<br>"\n + "FIPS: "\n + padded_f\n + "<br>Value: "\n + str(values[index])\n )\n\n x_centroids.append(x_c[0])\n y_centroids.append(y_c[0])\n centroid_text.append(t_c)\n\n x_traces[level] = x_traces[level] + x + [np.nan]\n y_traces[level] = y_traces[level] + y + [np.nan]\n elif fips_polygon_map[f].type == "MultiPolygon":\n x = [\n poly.simplify(simplify_county).exterior.xy[0].tolist()\n for poly in fips_polygon_map[f].geoms\n ]\n y = [\n poly.simplify(simplify_county).exterior.xy[1].tolist()\n for poly in fips_polygon_map[f].geoms\n ]\n\n x_c = [poly.centroid.xy[0].tolist() for poly in fips_polygon_map[f].geoms]\n y_c = [poly.centroid.xy[1].tolist() for poly in fips_polygon_map[f].geoms]\n\n county_name_str = str(df[df["FIPS"] == f]["COUNTY_NAME"].iloc[0])\n state_name_str = str(df[df["FIPS"] == f]["STATE_NAME"].iloc[0])\n text = (\n "County: "\n + county_name_str\n + "<br>"\n + "State: "\n + state_name_str\n + "<br>"\n + "FIPS: "\n + padded_f\n + "<br>Value: "\n + str(values[index])\n )\n t_c = [text for poly in fips_polygon_map[f].geoms]\n x_centroids = x_c + x_centroids\n y_centroids = y_c + y_centroids\n centroid_text = t_c + centroid_text\n for x_y_idx in range(len(x)):\n x_traces[level] = x_traces[level] + x[x_y_idx] + [np.nan]\n y_traces[level] = y_traces[level] + y[x_y_idx] + [np.nan]\n\n return x_traces, y_traces, x_centroids, y_centroids, centroid_text\n\n\ndef create_choropleth(\n fips,\n values,\n scope=["usa"],\n binning_endpoints=None,\n colorscale=None,\n order=None,\n simplify_county=0.02,\n simplify_state=0.02,\n asp=None,\n show_hover=True,\n show_state_data=True,\n state_outline=None,\n county_outline=None,\n centroid_marker=None,\n round_legend_values=False,\n exponent_format=False,\n legend_title="",\n **layout_options,\n):\n """\n **deprecated**, use instead\n :func:`plotly.express.choropleth` with custom GeoJSON.\n\n This function also requires `shapely`, `geopandas` and `plotly-geo` to be installed.\n\n Returns figure for county choropleth. Uses data from package_data.\n\n :param (list) fips: list of FIPS values which correspond to the con\n catination of state and county ids. An example is '01001'.\n :param (list) values: list of numbers/strings which correspond to the\n fips list. These are the values that will determine how the counties\n are colored.\n :param (list) scope: list of states and/or states abbreviations. Fits\n all states in the camera tightly. Selecting ['usa'] is the equivalent\n of appending all 50 states into your scope list. Selecting only 'usa'\n does not include 'Alaska', 'Puerto Rico', 'American Samoa',\n 'Commonwealth of the Northern Mariana Islands', 'Guam',\n 'United States Virgin Islands'. These must be added manually to the\n list.\n Default = ['usa']\n :param (list) binning_endpoints: ascending numbers which implicitly define\n real number intervals which are used as bins. The colorscale used must\n have the same number of colors as the number of bins and this will\n result in a categorical colormap.\n :param (list) colorscale: a list of colors with length equal to the\n number of categories of colors. The length must match either all\n unique numbers in the 'values' list or if endpoints is being used, the\n number of categories created by the endpoints.\n\n For example, if binning_endpoints = [4, 6, 8], then there are 4 bins:\n [-inf, 4), [4, 6), [6, 8), [8, inf)\n :param (list) order: a list of the unique categories (numbers/bins) in any\n desired order. This is helpful if you want to order string values to\n a chosen colorscale.\n :param (float) simplify_county: determines the simplification factor\n for the counties. The larger the number, the fewer vertices and edges\n each polygon has. See\n http://toblerity.org/shapely/manual.html#object.simplify for more\n information.\n Default = 0.02\n :param (float) simplify_state: simplifies the state outline polygon.\n See http://toblerity.org/shapely/manual.html#object.simplify for more\n information.\n Default = 0.02\n :param (float) asp: the width-to-height aspect ratio for the camera.\n Default = 2.5\n :param (bool) show_hover: show county hover and centroid info\n :param (bool) show_state_data: reveals state boundary lines\n :param (dict) state_outline: dict of attributes of the state outline\n including width and color. See\n https://plot.ly/python/reference/#scatter-marker-line for all valid\n params\n :param (dict) county_outline: dict of attributes of the county outline\n including width and color. See\n https://plot.ly/python/reference/#scatter-marker-line for all valid\n params\n :param (dict) centroid_marker: dict of attributes of the centroid marker.\n The centroid markers are invisible by default and appear visible on\n selection. See https://plot.ly/python/reference/#scatter-marker for\n all valid params\n :param (bool) round_legend_values: automatically round the numbers that\n appear in the legend to the nearest integer.\n Default = False\n :param (bool) exponent_format: if set to True, puts numbers in the K, M,\n B number format. For example 4000.0 becomes 4.0K\n Default = False\n :param (str) legend_title: title that appears above the legend\n :param **layout_options: a **kwargs argument for all layout parameters\n\n\n Example 1: Florida::\n\n import plotly.plotly as py\n import plotly.figure_factory as ff\n\n import numpy as np\n import pandas as pd\n\n df_sample = pd.read_csv(\n 'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'\n )\n df_sample_r = df_sample[df_sample['STNAME'] == 'Florida']\n\n values = df_sample_r['TOT_POP'].tolist()\n fips = df_sample_r['FIPS'].tolist()\n\n binning_endpoints = list(np.mgrid[min(values):max(values):4j])\n colorscale = ["#030512","#1d1d3b","#323268","#3d4b94","#3e6ab0",\n "#4989bc","#60a7c7","#85c5d3","#b7e0e4","#eafcfd"]\n fig = ff.create_choropleth(\n fips=fips, values=values, scope=['Florida'], show_state_data=True,\n colorscale=colorscale, binning_endpoints=binning_endpoints,\n round_legend_values=True, plot_bgcolor='rgb(229,229,229)',\n paper_bgcolor='rgb(229,229,229)', legend_title='Florida Population',\n county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},\n exponent_format=True,\n )\n\n Example 2: New England::\n\n import plotly.figure_factory as ff\n\n import pandas as pd\n\n NE_states = ['Connecticut', 'Maine', 'Massachusetts',\n 'New Hampshire', 'Rhode Island']\n df_sample = pd.read_csv(\n 'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'\n )\n df_sample_r = df_sample[df_sample['STNAME'].isin(NE_states)]\n colorscale = ['rgb(68.0, 1.0, 84.0)',\n 'rgb(66.0, 64.0, 134.0)',\n 'rgb(38.0, 130.0, 142.0)',\n 'rgb(63.0, 188.0, 115.0)',\n 'rgb(216.0, 226.0, 25.0)']\n\n values = df_sample_r['TOT_POP'].tolist()\n fips = df_sample_r['FIPS'].tolist()\n fig = ff.create_choropleth(\n fips=fips, values=values, scope=NE_states, show_state_data=True\n )\n fig.show()\n\n Example 3: California and Surrounding States::\n\n import plotly.figure_factory as ff\n\n import pandas as pd\n\n df_sample = pd.read_csv(\n 'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'\n )\n df_sample_r = df_sample[df_sample['STNAME'] == 'California']\n\n values = df_sample_r['TOT_POP'].tolist()\n fips = df_sample_r['FIPS'].tolist()\n\n colorscale = [\n 'rgb(193, 193, 193)',\n 'rgb(239,239,239)',\n 'rgb(195, 196, 222)',\n 'rgb(144,148,194)',\n 'rgb(101,104,168)',\n 'rgb(65, 53, 132)'\n ]\n\n fig = ff.create_choropleth(\n fips=fips, values=values, colorscale=colorscale,\n scope=['CA', 'AZ', 'Nevada', 'Oregon', ' Idaho'],\n binning_endpoints=[14348, 63983, 134827, 426762, 2081313],\n county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},\n legend_title='California Counties',\n title='California and Nearby States'\n )\n fig.show()\n\n Example 4: USA::\n\n import plotly.figure_factory as ff\n\n import numpy as np\n import pandas as pd\n\n df_sample = pd.read_csv(\n 'https://raw.githubusercontent.com/plotly/datasets/master/laucnty16.csv'\n )\n df_sample['State FIPS Code'] = df_sample['State FIPS Code'].apply(\n lambda x: str(x).zfill(2)\n )\n df_sample['County FIPS Code'] = df_sample['County FIPS Code'].apply(\n lambda x: str(x).zfill(3)\n )\n df_sample['FIPS'] = (\n df_sample['State FIPS Code'] + df_sample['County FIPS Code']\n )\n\n binning_endpoints = list(np.linspace(1, 12, len(colorscale) - 1))\n colorscale = ["#f7fbff", "#ebf3fb", "#deebf7", "#d2e3f3", "#c6dbef",\n "#b3d2e9", "#9ecae1", "#85bcdb", "#6baed6", "#57a0ce",\n "#4292c6", "#3082be", "#2171b5", "#1361a9", "#08519c",\n "#0b4083","#08306b"]\n fips = df_sample['FIPS']\n values = df_sample['Unemployment Rate (%)']\n fig = ff.create_choropleth(\n fips=fips, values=values, scope=['usa'],\n binning_endpoints=binning_endpoints, colorscale=colorscale,\n show_hover=True, centroid_marker={'opacity': 0},\n asp=2.9, title='USA by Unemployment %',\n legend_title='Unemployment %'\n )\n fig.show()\n """\n # ensure optional modules imported\n if not _plotly_geo:\n raise ValueError(\n """\nThe create_choropleth figure factory requires the plotly-geo package.\nInstall using pip with:\n\n$ pip install plotly-geo\n\nOr, install using conda with\n\n$ conda install -c plotly plotly-geo\n"""\n )\n\n if not gp or not shapefile or not shapely:\n raise ImportError(\n "geopandas, pyshp and shapely must be installed for this figure "\n "factory.\n\nRun the following commands to install the correct "\n "versions of the following modules:\n\n"\n "```\n"\n "$ pip install geopandas==0.3.0\n"\n "$ pip install pyshp==1.2.10\n"\n "$ pip install shapely==1.6.3\n"\n "```\n"\n "If you are using Windows, follow this post to properly "\n "install geopandas and dependencies:"\n "http://geoffboeing.com/2014/09/using-geopandas-windows/\n\n"\n "If you are using Anaconda, do not use PIP to install the "\n "packages above. Instead use conda to install them:\n\n"\n "```\n"\n "$ conda install plotly\n"\n "$ conda install geopandas\n"\n "```"\n )\n\n df, df_state = _create_us_counties_df(st_to_state_name_dict, state_to_st_dict)\n\n fips_polygon_map = dict(zip(df["FIPS"].tolist(), df["geometry"].tolist()))\n\n if not state_outline:\n state_outline = {"color": "rgb(240, 240, 240)", "width": 1}\n if not county_outline:\n county_outline = {"color": "rgb(0, 0, 0)", "width": 0}\n if not centroid_marker:\n centroid_marker = {"size": 3, "color": "white", "opacity": 1}\n\n # ensure centroid markers appear on selection\n if "opacity" not in centroid_marker:\n centroid_marker.update({"opacity": 1})\n\n if len(fips) != len(values):\n raise PlotlyError("fips and values must be the same length")\n\n # make fips, values into lists\n if isinstance(fips, pd.core.series.Series):\n fips = fips.tolist()\n if isinstance(values, pd.core.series.Series):\n values = values.tolist()\n\n # make fips numeric\n fips = map(lambda x: int(x), fips)\n\n if binning_endpoints:\n intervals = utils.endpts_to_intervals(binning_endpoints)\n LEVELS = _intervals_as_labels(intervals, round_legend_values, exponent_format)\n else:\n if not order:\n LEVELS = sorted(list(set(values)))\n else:\n # check if order is permutation\n # of unique color col values\n same_sets = sorted(list(set(values))) == set(order)\n no_duplicates = not any(order.count(x) > 1 for x in order)\n if same_sets and no_duplicates:\n LEVELS = order\n else:\n raise PlotlyError(\n "if you are using a custom order of unique values from "\n "your color column, you must: have all the unique values "\n "in your order and have no duplicate items"\n )\n\n if not colorscale:\n colorscale = []\n viridis_colors = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES["Viridis"])\n viridis_colors = clrs.color_parser(viridis_colors, clrs.hex_to_rgb)\n viridis_colors = clrs.color_parser(viridis_colors, clrs.label_rgb)\n viri_len = len(viridis_colors) + 1\n viri_intervals = utils.endpts_to_intervals(list(np.linspace(0, 1, viri_len)))[\n 1:-1\n ]\n\n for L in np.linspace(0, 1, len(LEVELS)):\n for idx, inter in enumerate(viri_intervals):\n if L == 0:\n break\n elif inter[0] < L <= inter[1]:\n break\n\n intermed = (L - viri_intervals[idx][0]) / (\n viri_intervals[idx][1] - viri_intervals[idx][0]\n )\n\n float_color = clrs.find_intermediate_color(\n viridis_colors[idx], viridis_colors[idx], intermed, colortype="rgb"\n )\n\n # make R,G,B into int values\n float_color = clrs.unlabel_rgb(float_color)\n float_color = clrs.unconvert_from_RGB_255(float_color)\n int_rgb = clrs.convert_to_RGB_255(float_color)\n int_rgb = clrs.label_rgb(int_rgb)\n\n colorscale.append(int_rgb)\n\n if len(colorscale) < len(LEVELS):\n raise PlotlyError(\n "You have {} LEVELS. Your number of colors in 'colorscale' must "\n "be at least the number of LEVELS: {}. If you are "\n "using 'binning_endpoints' then 'colorscale' must have at "\n "least len(binning_endpoints) + 2 colors".format(\n len(LEVELS), min(LEVELS, LEVELS[:20])\n )\n )\n\n color_lookup = dict(zip(LEVELS, colorscale))\n x_traces = dict(zip(LEVELS, [[] for i in range(len(LEVELS))]))\n y_traces = dict(zip(LEVELS, [[] for i in range(len(LEVELS))]))\n\n # scope\n if isinstance(scope, str):\n raise PlotlyError("'scope' must be a list/tuple/sequence")\n\n scope_names = []\n extra_states = [\n "Alaska",\n "Commonwealth of the Northern Mariana Islands",\n "Puerto Rico",\n "Guam",\n "United States Virgin Islands",\n "American Samoa",\n ]\n for state in scope:\n if state.lower() == "usa":\n scope_names = df["STATE_NAME"].unique()\n scope_names = list(scope_names)\n for ex_st in extra_states:\n try:\n scope_names.remove(ex_st)\n except ValueError:\n pass\n else:\n if state in st_to_state_name_dict.keys():\n state = st_to_state_name_dict[state]\n scope_names.append(state)\n df_state = df_state[df_state["STATE_NAME"].isin(scope_names)]\n\n plot_data = []\n x_centroids = []\n y_centroids = []\n centroid_text = []\n fips_not_in_shapefile = []\n if not binning_endpoints:\n for index, f in enumerate(fips):\n level = values[index]\n try:\n fips_polygon_map[f].type\n\n (\n x_traces,\n y_traces,\n x_centroids,\n y_centroids,\n centroid_text,\n ) = _calculations(\n df,\n fips,\n values,\n index,\n f,\n simplify_county,\n level,\n x_centroids,\n y_centroids,\n centroid_text,\n x_traces,\n y_traces,\n fips_polygon_map,\n )\n except KeyError:\n fips_not_in_shapefile.append(f)\n\n else:\n for index, f in enumerate(fips):\n for j, inter in enumerate(intervals):\n if inter[0] < values[index] <= inter[1]:\n break\n level = LEVELS[j]\n\n try:\n fips_polygon_map[f].type\n\n (\n x_traces,\n y_traces,\n x_centroids,\n y_centroids,\n centroid_text,\n ) = _calculations(\n df,\n fips,\n values,\n index,\n f,\n simplify_county,\n level,\n x_centroids,\n y_centroids,\n centroid_text,\n x_traces,\n y_traces,\n fips_polygon_map,\n )\n except KeyError:\n fips_not_in_shapefile.append(f)\n\n if len(fips_not_in_shapefile) > 0:\n msg = (\n "Unrecognized FIPS Values\n\nWhoops! It looks like you are "\n "trying to pass at least one FIPS value that is not in "\n "our shapefile of FIPS and data for the counties. Your "\n "choropleth will still show up but these counties cannot "\n "be shown.\nUnrecognized FIPS are: {}".format(fips_not_in_shapefile)\n )\n warnings.warn(msg)\n\n x_states = []\n y_states = []\n for index, row in df_state.iterrows():\n if df_state["geometry"][index].type == "Polygon":\n x = row.geometry.simplify(simplify_state).exterior.xy[0].tolist()\n y = row.geometry.simplify(simplify_state).exterior.xy[1].tolist()\n x_states = x_states + x\n y_states = y_states + y\n elif df_state["geometry"][index].type == "MultiPolygon":\n x = [\n poly.simplify(simplify_state).exterior.xy[0].tolist()\n for poly in df_state["geometry"][index].geoms\n ]\n y = [\n poly.simplify(simplify_state).exterior.xy[1].tolist()\n for poly in df_state["geometry"][index].geoms\n ]\n for segment in range(len(x)):\n x_states = x_states + x[segment]\n y_states = y_states + y[segment]\n x_states.append(np.nan)\n y_states.append(np.nan)\n x_states.append(np.nan)\n y_states.append(np.nan)\n\n for lev in LEVELS:\n county_data = dict(\n type="scatter",\n mode="lines",\n x=x_traces[lev],\n y=y_traces[lev],\n line=county_outline,\n fill="toself",\n fillcolor=color_lookup[lev],\n name=lev,\n hoverinfo="none",\n )\n plot_data.append(county_data)\n\n if show_hover:\n hover_points = dict(\n type="scatter",\n showlegend=False,\n legendgroup="centroids",\n x=x_centroids,\n y=y_centroids,\n text=centroid_text,\n name="US Counties",\n mode="markers",\n marker={"color": "white", "opacity": 0},\n hoverinfo="text",\n )\n centroids_on_select = dict(\n selected=dict(marker=centroid_marker),\n unselected=dict(marker=dict(opacity=0)),\n )\n hover_points.update(centroids_on_select)\n plot_data.append(hover_points)\n\n if show_state_data:\n state_data = dict(\n type="scatter",\n legendgroup="States",\n line=state_outline,\n x=x_states,\n y=y_states,\n hoverinfo="text",\n showlegend=False,\n mode="lines",\n )\n plot_data.append(state_data)\n\n DEFAULT_LAYOUT = dict(\n hovermode="closest",\n xaxis=dict(\n autorange=False,\n range=USA_XRANGE,\n showgrid=False,\n zeroline=False,\n fixedrange=True,\n showticklabels=False,\n ),\n yaxis=dict(\n autorange=False,\n range=USA_YRANGE,\n showgrid=False,\n zeroline=False,\n fixedrange=True,\n showticklabels=False,\n ),\n margin=dict(t=40, b=20, r=20, l=20),\n width=900,\n height=450,\n dragmode="select",\n legend=dict(traceorder="reversed", xanchor="right", yanchor="top", x=1, y=1),\n annotations=[],\n )\n fig = dict(data=plot_data, layout=DEFAULT_LAYOUT)\n fig["layout"].update(layout_options)\n fig["layout"]["annotations"].append(\n dict(\n x=1,\n y=1.05,\n xref="paper",\n yref="paper",\n xanchor="right",\n showarrow=False,\n text="<b>" + legend_title + "</b>",\n )\n )\n\n if len(scope) == 1 and scope[0].lower() == "usa":\n xaxis_range_low = -125.0\n xaxis_range_high = -55.0\n yaxis_range_low = 25.0\n yaxis_range_high = 49.0\n else:\n xaxis_range_low = float("inf")\n xaxis_range_high = float("-inf")\n yaxis_range_low = float("inf")\n yaxis_range_high = float("-inf")\n for trace in fig["data"]:\n if all(isinstance(n, Number) for n in trace["x"]):\n calc_x_min = min(trace["x"] or [float("inf")])\n calc_x_max = max(trace["x"] or [float("-inf")])\n if calc_x_min < xaxis_range_low:\n xaxis_range_low = calc_x_min\n if calc_x_max > xaxis_range_high:\n xaxis_range_high = calc_x_max\n if all(isinstance(n, Number) for n in trace["y"]):\n calc_y_min = min(trace["y"] or [float("inf")])\n calc_y_max = max(trace["y"] or [float("-inf")])\n if calc_y_min < yaxis_range_low:\n yaxis_range_low = calc_y_min\n if calc_y_max > yaxis_range_high:\n yaxis_range_high = calc_y_max\n\n # camera zoom\n fig["layout"]["xaxis"]["range"] = [xaxis_range_low, xaxis_range_high]\n fig["layout"]["yaxis"]["range"] = [yaxis_range_low, yaxis_range_high]\n\n # aspect ratio\n if asp is None:\n usa_x_range = USA_XRANGE[1] - USA_XRANGE[0]\n usa_y_range = USA_YRANGE[1] - USA_YRANGE[0]\n asp = usa_x_range / usa_y_range\n\n # based on your figure\n width = float(\n fig["layout"]["xaxis"]["range"][1] - fig["layout"]["xaxis"]["range"][0]\n )\n height = float(\n fig["layout"]["yaxis"]["range"][1] - fig["layout"]["yaxis"]["range"][0]\n )\n\n center = (\n sum(fig["layout"]["xaxis"]["range"]) / 2.0,\n sum(fig["layout"]["yaxis"]["range"]) / 2.0,\n )\n\n if height / width > (1 / asp):\n new_width = asp * height\n fig["layout"]["xaxis"]["range"][0] = center[0] - new_width * 0.5\n fig["layout"]["xaxis"]["range"][1] = center[0] + new_width * 0.5\n else:\n new_height = (1 / asp) * width\n fig["layout"]["yaxis"]["range"][0] = center[1] - new_height * 0.5\n fig["layout"]["yaxis"]["range"][1] = center[1] + new_height * 0.5\n\n return go.Figure(fig)\n
.venv\Lib\site-packages\plotly\figure_factory\_county_choropleth.py
_county_choropleth.py
Python
34,007
0.95
0.09773
0.018952
react-lib
441
2024-09-10T17:30:09.127539
MIT
false
5e0ff4b75e482bb9762e3bcbf9431102
from collections import OrderedDict\n\nfrom plotly import exceptions, optional_imports\nfrom plotly.graph_objs import graph_objs\n\n# Optional imports, may be None for users that only use our core functionality.\nnp = optional_imports.get_module("numpy")\nscp = optional_imports.get_module("scipy")\nsch = optional_imports.get_module("scipy.cluster.hierarchy")\nscs = optional_imports.get_module("scipy.spatial")\n\n\ndef create_dendrogram(\n X,\n orientation="bottom",\n labels=None,\n colorscale=None,\n distfun=None,\n linkagefun=lambda x: sch.linkage(x, "complete"),\n hovertext=None,\n color_threshold=None,\n):\n """\n Function that returns a dendrogram Plotly figure object. This is a thin\n wrapper around scipy.cluster.hierarchy.dendrogram.\n\n See also https://dash.plot.ly/dash-bio/clustergram.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (str) orientation: 'top', 'right', 'bottom', or 'left'\n :param (list) labels: List of axis category labels(observation labels)\n :param (list) colorscale: Optional colorscale for the dendrogram tree.\n Requires 8 colors to be specified, the 7th of\n which is ignored. With scipy>=1.5.0, the 2nd, 3rd\n and 6th are used twice as often as the others.\n Given a shorter list, the missing values are\n replaced with defaults and with a longer list the\n extra values are ignored.\n :param (function) distfun: Function to compute the pairwise distance from\n the observations\n :param (function) linkagefun: Function to compute the linkage matrix from\n the pairwise distances\n :param (list[list]) hovertext: List of hovertext for constituent traces of dendrogram\n clusters\n :param (double) color_threshold: Value at which the separation of clusters will be made\n\n Example 1: Simple bottom oriented dendrogram\n\n >>> from plotly.figure_factory import create_dendrogram\n\n >>> import numpy as np\n\n >>> X = np.random.rand(10,10)\n >>> fig = create_dendrogram(X)\n >>> fig.show()\n\n Example 2: Dendrogram to put on the left of the heatmap\n\n >>> from plotly.figure_factory import create_dendrogram\n\n >>> import numpy as np\n\n >>> X = np.random.rand(5,5)\n >>> names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']\n >>> dendro = create_dendrogram(X, orientation='right', labels=names)\n >>> dendro.update_layout({'width':700, 'height':500}) # doctest: +SKIP\n >>> dendro.show()\n\n Example 3: Dendrogram with Pandas\n\n >>> from plotly.figure_factory import create_dendrogram\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> Index= ['A','B','C','D','E','F','G','H','I','J']\n >>> df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)\n >>> fig = create_dendrogram(df, labels=Index)\n >>> fig.show()\n """\n if not scp or not scs or not sch:\n raise ImportError(\n "FigureFactory.create_dendrogram requires scipy, \\n scipy.spatial and scipy.hierarchy"\n )\n\n s = X.shape\n if len(s) != 2:\n exceptions.PlotlyError("X should be 2-dimensional array.")\n\n if distfun is None:\n distfun = scs.distance.pdist\n\n dendrogram = _Dendrogram(\n X,\n orientation,\n labels,\n colorscale,\n distfun=distfun,\n linkagefun=linkagefun,\n hovertext=hovertext,\n color_threshold=color_threshold,\n )\n\n return graph_objs.Figure(data=dendrogram.data, layout=dendrogram.layout)\n\n\nclass _Dendrogram(object):\n """Refer to FigureFactory.create_dendrogram() for docstring."""\n\n def __init__(\n self,\n X,\n orientation="bottom",\n labels=None,\n colorscale=None,\n width=np.inf,\n height=np.inf,\n xaxis="xaxis",\n yaxis="yaxis",\n distfun=None,\n linkagefun=lambda x: sch.linkage(x, "complete"),\n hovertext=None,\n color_threshold=None,\n ):\n self.orientation = orientation\n self.labels = labels\n self.xaxis = xaxis\n self.yaxis = yaxis\n self.data = []\n self.leaves = []\n self.sign = {self.xaxis: 1, self.yaxis: 1}\n self.layout = {self.xaxis: {}, self.yaxis: {}}\n\n if self.orientation in ["left", "bottom"]:\n self.sign[self.xaxis] = 1\n else:\n self.sign[self.xaxis] = -1\n\n if self.orientation in ["right", "bottom"]:\n self.sign[self.yaxis] = 1\n else:\n self.sign[self.yaxis] = -1\n\n if distfun is None:\n distfun = scs.distance.pdist\n\n (dd_traces, xvals, yvals, ordered_labels, leaves) = self.get_dendrogram_traces(\n X, colorscale, distfun, linkagefun, hovertext, color_threshold\n )\n\n self.labels = ordered_labels\n self.leaves = leaves\n yvals_flat = yvals.flatten()\n xvals_flat = xvals.flatten()\n\n self.zero_vals = []\n\n for i in range(len(yvals_flat)):\n if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:\n self.zero_vals.append(xvals_flat[i])\n\n if len(self.zero_vals) > len(yvals) + 1:\n # If the length of zero_vals is larger than the length of yvals,\n # it means that there are wrong vals because of the identicial samples.\n # Three and more identicial samples will make the yvals of spliting\n # center into 0 and it will accidentally take it as leaves.\n l_border = int(min(self.zero_vals))\n r_border = int(max(self.zero_vals))\n correct_leaves_pos = range(\n l_border, r_border + 1, int((r_border - l_border) / len(yvals))\n )\n # Regenerating the leaves pos from the self.zero_vals with equally intervals.\n self.zero_vals = [v for v in correct_leaves_pos]\n\n self.zero_vals.sort()\n self.layout = self.set_figure_layout(width, height)\n self.data = dd_traces\n\n def get_color_dict(self, colorscale):\n """\n Returns colorscale used for dendrogram tree clusters.\n\n :param (list) colorscale: Colors to use for the plot in rgb format.\n :rtype (dict): A dict of default colors mapped to the user colorscale.\n\n """\n\n # These are the color codes returned for dendrograms\n # We're replacing them with nicer colors\n # This list is the colors that can be used by dendrogram, which were\n # determined as the combination of the default above_threshold_color and\n # the default color palette (see scipy/cluster/hierarchy.py)\n d = {\n "r": "red",\n "g": "green",\n "b": "blue",\n "c": "cyan",\n "m": "magenta",\n "y": "yellow",\n "k": "black",\n # TODO: 'w' doesn't seem to be in the default color\n # palette in scipy/cluster/hierarchy.py\n "w": "white",\n }\n default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))\n\n if colorscale is None:\n rgb_colorscale = [\n "rgb(0,116,217)", # blue\n "rgb(35,205,205)", # cyan\n "rgb(61,153,112)", # green\n "rgb(40,35,35)", # black\n "rgb(133,20,75)", # magenta\n "rgb(255,65,54)", # red\n "rgb(255,255,255)", # white\n "rgb(255,220,0)", # yellow\n ]\n else:\n rgb_colorscale = colorscale\n\n for i in range(len(default_colors.keys())):\n k = list(default_colors.keys())[i] # PY3 won't index keys\n if i < len(rgb_colorscale):\n default_colors[k] = rgb_colorscale[i]\n\n # add support for cyclic format colors as introduced in scipy===1.5.0\n # before this, the colors were named 'r', 'b', 'y' etc., now they are\n # named 'C0', 'C1', etc. To keep the colors consistent regardless of the\n # scipy version, we try as much as possible to map the new colors to the\n # old colors\n # this mapping was found by inpecting scipy/cluster/hierarchy.py (see\n # comment above).\n new_old_color_map = [\n ("C0", "b"),\n ("C1", "g"),\n ("C2", "r"),\n ("C3", "c"),\n ("C4", "m"),\n ("C5", "y"),\n ("C6", "k"),\n ("C7", "g"),\n ("C8", "r"),\n ("C9", "c"),\n ]\n for nc, oc in new_old_color_map:\n try:\n default_colors[nc] = default_colors[oc]\n except KeyError:\n # it could happen that the old color isn't found (if a custom\n # colorscale was specified), in this case we set it to an\n # arbitrary default.\n default_colors[nc] = "rgb(0,116,217)"\n\n return default_colors\n\n def set_axis_layout(self, axis_key):\n """\n Sets and returns default axis object for dendrogram figure.\n\n :param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.\n :rtype (dict): An axis_key dictionary with set parameters.\n\n """\n axis_defaults = {\n "type": "linear",\n "ticks": "outside",\n "mirror": "allticks",\n "rangemode": "tozero",\n "showticklabels": True,\n "zeroline": False,\n "showgrid": False,\n "showline": True,\n }\n\n if len(self.labels) != 0:\n axis_key_labels = self.xaxis\n if self.orientation in ["left", "right"]:\n axis_key_labels = self.yaxis\n if axis_key_labels not in self.layout:\n self.layout[axis_key_labels] = {}\n self.layout[axis_key_labels]["tickvals"] = [\n zv * self.sign[axis_key] for zv in self.zero_vals\n ]\n self.layout[axis_key_labels]["ticktext"] = self.labels\n self.layout[axis_key_labels]["tickmode"] = "array"\n\n self.layout[axis_key].update(axis_defaults)\n\n return self.layout[axis_key]\n\n def set_figure_layout(self, width, height):\n """\n Sets and returns default layout object for dendrogram figure.\n\n """\n self.layout.update(\n {\n "showlegend": False,\n "autosize": False,\n "hovermode": "closest",\n "width": width,\n "height": height,\n }\n )\n\n self.set_axis_layout(self.xaxis)\n self.set_axis_layout(self.yaxis)\n\n return self.layout\n\n def get_dendrogram_traces(\n self, X, colorscale, distfun, linkagefun, hovertext, color_threshold\n ):\n """\n Calculates all the elements needed for plotting a dendrogram.\n\n :param (ndarray) X: Matrix of observations as array of arrays\n :param (list) colorscale: Color scale for dendrogram tree clusters\n :param (function) distfun: Function to compute the pairwise distance\n from the observations\n :param (function) linkagefun: Function to compute the linkage matrix\n from the pairwise distances\n :param (list) hovertext: List of hovertext for constituent traces of dendrogram\n :rtype (tuple): Contains all the traces in the following order:\n (a) trace_list: List of Plotly trace objects for dendrogram tree\n (b) icoord: All X points of the dendrogram tree as array of arrays\n with length 4\n (c) dcoord: All Y points of the dendrogram tree as array of arrays\n with length 4\n (d) ordered_labels: leaf labels in the order they are going to\n appear on the plot\n (e) P['leaves']: left-to-right traversal of the leaves\n\n """\n d = distfun(X)\n Z = linkagefun(d)\n P = sch.dendrogram(\n Z,\n orientation=self.orientation,\n labels=self.labels,\n no_plot=True,\n color_threshold=color_threshold,\n )\n\n icoord = np.array(P["icoord"])\n dcoord = np.array(P["dcoord"])\n ordered_labels = np.array(P["ivl"])\n color_list = np.array(P["color_list"])\n colors = self.get_color_dict(colorscale)\n\n trace_list = []\n\n for i in range(len(icoord)):\n # xs and ys are arrays of 4 points that make up the '∩' shapes\n # of the dendrogram tree\n if self.orientation in ["top", "bottom"]:\n xs = icoord[i]\n else:\n xs = dcoord[i]\n\n if self.orientation in ["top", "bottom"]:\n ys = dcoord[i]\n else:\n ys = icoord[i]\n color_key = color_list[i]\n hovertext_label = None\n if hovertext:\n hovertext_label = hovertext[i]\n trace = dict(\n type="scatter",\n x=np.multiply(self.sign[self.xaxis], xs),\n y=np.multiply(self.sign[self.yaxis], ys),\n mode="lines",\n marker=dict(color=colors[color_key]),\n text=hovertext_label,\n hoverinfo="text",\n )\n\n try:\n x_index = int(self.xaxis[-1])\n except ValueError:\n x_index = ""\n\n try:\n y_index = int(self.yaxis[-1])\n except ValueError:\n y_index = ""\n\n trace["xaxis"] = f"x{x_index}"\n trace["yaxis"] = f"y{y_index}"\n\n trace_list.append(trace)\n\n return trace_list, icoord, dcoord, ordered_labels, P["leaves"]\n
.venv\Lib\site-packages\plotly\figure_factory\_dendrogram.py
_dendrogram.py
Python
13,880
0.95
0.131646
0.075529
vue-tools
812
2023-12-01T18:22:50.025974
BSD-3-Clause
false
63e84846205a8abb5874042877ba3400
from plotly import exceptions, optional_imports\nfrom plotly.figure_factory import utils\nfrom plotly.graph_objs import graph_objs\n\n# Optional imports, may be None for users that only use our core functionality.\nnp = optional_imports.get_module("numpy")\npd = optional_imports.get_module("pandas")\nscipy = optional_imports.get_module("scipy")\nscipy_stats = optional_imports.get_module("scipy.stats")\n\n\nDEFAULT_HISTNORM = "probability density"\nALTERNATIVE_HISTNORM = "probability"\n\n\ndef validate_distplot(hist_data, curve_type):\n """\n Distplot-specific validations\n\n :raises: (PlotlyError) If hist_data is not a list of lists\n :raises: (PlotlyError) If curve_type is not valid (i.e. not 'kde' or\n 'normal').\n """\n hist_data_types = (list,)\n if np:\n hist_data_types += (np.ndarray,)\n if pd:\n hist_data_types += (pd.core.series.Series,)\n\n if not isinstance(hist_data[0], hist_data_types):\n raise exceptions.PlotlyError(\n "Oops, this function was written "\n "to handle multiple datasets, if "\n "you want to plot just one, make "\n "sure your hist_data variable is "\n "still a list of lists, i.e. x = "\n "[1, 2, 3] -> x = [[1, 2, 3]]"\n )\n\n curve_opts = ("kde", "normal")\n if curve_type not in curve_opts:\n raise exceptions.PlotlyError("curve_type must be defined as 'kde' or 'normal'")\n\n if not scipy:\n raise ImportError("FigureFactory.create_distplot requires scipy")\n\n\ndef create_distplot(\n hist_data,\n group_labels,\n bin_size=1.0,\n curve_type="kde",\n colors=None,\n rug_text=None,\n histnorm=DEFAULT_HISTNORM,\n show_hist=True,\n show_curve=True,\n show_rug=True,\n):\n """\n Function that creates a distplot similar to seaborn.distplot;\n **this function is deprecated**, use instead :mod:`plotly.express`\n functions, for example\n\n >>> import plotly.express as px\n >>> tips = px.data.tips()\n >>> fig = px.histogram(tips, x="total_bill", y="tip", color="sex", marginal="rug",\n ... hover_data=tips.columns)\n >>> fig.show()\n\n\n The distplot can be composed of all or any combination of the following\n 3 components: (1) histogram, (2) curve: (a) kernel density estimation\n or (b) normal curve, and (3) rug plot. Additionally, multiple distplots\n (from multiple datasets) can be created in the same plot.\n\n :param (list[list]) hist_data: Use list of lists to plot multiple data\n sets on the same plot.\n :param (list[str]) group_labels: Names for each data set.\n :param (list[float]|float) bin_size: Size of histogram bins.\n Default = 1.\n :param (str) curve_type: 'kde' or 'normal'. Default = 'kde'\n :param (str) histnorm: 'probability density' or 'probability'\n Default = 'probability density'\n :param (bool) show_hist: Add histogram to distplot? Default = True\n :param (bool) show_curve: Add curve to distplot? Default = True\n :param (bool) show_rug: Add rug to distplot? Default = True\n :param (list[str]) colors: Colors for traces.\n :param (list[list]) rug_text: Hovertext values for rug_plot,\n :return (dict): Representation of a distplot figure.\n\n Example 1: Simple distplot of 1 data set\n\n >>> from plotly.figure_factory import create_distplot\n\n >>> hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,\n ... 3.5, 4.1, 4.4, 4.5, 4.5,\n ... 5.0, 5.0, 5.2, 5.5, 5.5,\n ... 5.5, 5.5, 5.5, 6.1, 7.0]]\n >>> group_labels = ['distplot example']\n >>> fig = create_distplot(hist_data, group_labels)\n >>> fig.show()\n\n\n Example 2: Two data sets and added rug text\n\n >>> from plotly.figure_factory import create_distplot\n >>> # Add histogram data\n >>> hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,\n ... -0.9, -0.07, 1.95, 0.9, -0.2,\n ... -0.5, 0.3, 0.4, -0.37, 0.6]\n >>> hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,\n ... 1.0, 0.8, 1.7, 0.5, 0.8,\n ... -0.3, 1.2, 0.56, 0.3, 2.2]\n\n >>> # Group data together\n >>> hist_data = [hist1_x, hist2_x]\n\n >>> group_labels = ['2012', '2013']\n\n >>> # Add text\n >>> rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',\n ... 'f1', 'g1', 'h1', 'i1', 'j1',\n ... 'k1', 'l1', 'm1', 'n1', 'o1']\n\n >>> rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',\n ... 'f2', 'g2', 'h2', 'i2', 'j2',\n ... 'k2', 'l2', 'm2', 'n2', 'o2']\n\n >>> # Group text together\n >>> rug_text_all = [rug_text_1, rug_text_2]\n\n >>> # Create distplot\n >>> fig = create_distplot(\n ... hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)\n\n >>> # Add title\n >>> fig.update_layout(title='Dist Plot') # doctest: +SKIP\n >>> fig.show()\n\n\n Example 3: Plot with normal curve and hide rug plot\n\n >>> from plotly.figure_factory import create_distplot\n >>> import numpy as np\n\n >>> x1 = np.random.randn(190)\n >>> x2 = np.random.randn(200)+1\n >>> x3 = np.random.randn(200)-1\n >>> x4 = np.random.randn(210)+2\n\n >>> hist_data = [x1, x2, x3, x4]\n >>> group_labels = ['2012', '2013', '2014', '2015']\n\n >>> fig = create_distplot(\n ... hist_data, group_labels, curve_type='normal',\n ... show_rug=False, bin_size=.4)\n\n\n Example 4: Distplot with Pandas\n\n >>> from plotly.figure_factory import create_distplot\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> df = pd.DataFrame({'2012': np.random.randn(200),\n ... '2013': np.random.randn(200)+1})\n >>> fig = create_distplot([df[c] for c in df.columns], df.columns)\n >>> fig.show()\n """\n if colors is None:\n colors = []\n if rug_text is None:\n rug_text = []\n\n validate_distplot(hist_data, curve_type)\n utils.validate_equal_length(hist_data, group_labels)\n\n if isinstance(bin_size, (float, int)):\n bin_size = [bin_size] * len(hist_data)\n\n data = []\n if show_hist:\n hist = _Distplot(\n hist_data,\n histnorm,\n group_labels,\n bin_size,\n curve_type,\n colors,\n rug_text,\n show_hist,\n show_curve,\n ).make_hist()\n\n data.append(hist)\n\n if show_curve:\n if curve_type == "normal":\n curve = _Distplot(\n hist_data,\n histnorm,\n group_labels,\n bin_size,\n curve_type,\n colors,\n rug_text,\n show_hist,\n show_curve,\n ).make_normal()\n else:\n curve = _Distplot(\n hist_data,\n histnorm,\n group_labels,\n bin_size,\n curve_type,\n colors,\n rug_text,\n show_hist,\n show_curve,\n ).make_kde()\n\n data.append(curve)\n\n if show_rug:\n rug = _Distplot(\n hist_data,\n histnorm,\n group_labels,\n bin_size,\n curve_type,\n colors,\n rug_text,\n show_hist,\n show_curve,\n ).make_rug()\n\n data.append(rug)\n layout = graph_objs.Layout(\n barmode="overlay",\n hovermode="closest",\n legend=dict(traceorder="reversed"),\n xaxis1=dict(domain=[0.0, 1.0], anchor="y2", zeroline=False),\n yaxis1=dict(domain=[0.35, 1], anchor="free", position=0.0),\n yaxis2=dict(domain=[0, 0.25], anchor="x1", dtick=1, showticklabels=False),\n )\n else:\n layout = graph_objs.Layout(\n barmode="overlay",\n hovermode="closest",\n legend=dict(traceorder="reversed"),\n xaxis1=dict(domain=[0.0, 1.0], anchor="y2", zeroline=False),\n yaxis1=dict(domain=[0.0, 1], anchor="free", position=0.0),\n )\n\n data = sum(data, [])\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Distplot(object):\n """\n Refer to TraceFactory.create_distplot() for docstring\n """\n\n def __init__(\n self,\n hist_data,\n histnorm,\n group_labels,\n bin_size,\n curve_type,\n colors,\n rug_text,\n show_hist,\n show_curve,\n ):\n self.hist_data = hist_data\n self.histnorm = histnorm\n self.group_labels = group_labels\n self.bin_size = bin_size\n self.show_hist = show_hist\n self.show_curve = show_curve\n self.trace_number = len(hist_data)\n if rug_text:\n self.rug_text = rug_text\n else:\n self.rug_text = [None] * self.trace_number\n\n self.start = []\n self.end = []\n if colors:\n self.colors = colors\n else:\n self.colors = [\n "rgb(31, 119, 180)",\n "rgb(255, 127, 14)",\n "rgb(44, 160, 44)",\n "rgb(214, 39, 40)",\n "rgb(148, 103, 189)",\n "rgb(140, 86, 75)",\n "rgb(227, 119, 194)",\n "rgb(127, 127, 127)",\n "rgb(188, 189, 34)",\n "rgb(23, 190, 207)",\n ]\n self.curve_x = [None] * self.trace_number\n self.curve_y = [None] * self.trace_number\n\n for trace in self.hist_data:\n self.start.append(min(trace) * 1.0)\n self.end.append(max(trace) * 1.0)\n\n def make_hist(self):\n """\n Makes the histogram(s) for FigureFactory.create_distplot().\n\n :rtype (list) hist: list of histogram representations\n """\n hist = [None] * self.trace_number\n\n for index in range(self.trace_number):\n hist[index] = dict(\n type="histogram",\n x=self.hist_data[index],\n xaxis="x1",\n yaxis="y1",\n histnorm=self.histnorm,\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n marker=dict(color=self.colors[index % len(self.colors)]),\n autobinx=False,\n xbins=dict(\n start=self.start[index],\n end=self.end[index],\n size=self.bin_size[index],\n ),\n opacity=0.7,\n )\n return hist\n\n def make_kde(self):\n """\n Makes the kernel density estimation(s) for create_distplot().\n\n This is called when curve_type = 'kde' in create_distplot().\n\n :rtype (list) curve: list of kde representations\n """\n curve = [None] * self.trace_number\n for index in range(self.trace_number):\n self.curve_x[index] = [\n self.start[index] + x * (self.end[index] - self.start[index]) / 500\n for x in range(500)\n ]\n self.curve_y[index] = scipy_stats.gaussian_kde(self.hist_data[index])(\n self.curve_x[index]\n )\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(\n type="scatter",\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis="x1",\n yaxis="y1",\n mode="lines",\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index % len(self.colors)]),\n )\n return curve\n\n def make_normal(self):\n """\n Makes the normal curve(s) for create_distplot().\n\n This is called when curve_type = 'normal' in create_distplot().\n\n :rtype (list) curve: list of normal curve representations\n """\n curve = [None] * self.trace_number\n mean = [None] * self.trace_number\n sd = [None] * self.trace_number\n\n for index in range(self.trace_number):\n mean[index], sd[index] = scipy_stats.norm.fit(self.hist_data[index])\n self.curve_x[index] = [\n self.start[index] + x * (self.end[index] - self.start[index]) / 500\n for x in range(500)\n ]\n self.curve_y[index] = scipy_stats.norm.pdf(\n self.curve_x[index], loc=mean[index], scale=sd[index]\n )\n\n if self.histnorm == ALTERNATIVE_HISTNORM:\n self.curve_y[index] *= self.bin_size[index]\n\n for index in range(self.trace_number):\n curve[index] = dict(\n type="scatter",\n x=self.curve_x[index],\n y=self.curve_y[index],\n xaxis="x1",\n yaxis="y1",\n mode="lines",\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=False if self.show_hist else True,\n marker=dict(color=self.colors[index % len(self.colors)]),\n )\n return curve\n\n def make_rug(self):\n """\n Makes the rug plot(s) for create_distplot().\n\n :rtype (list) rug: list of rug plot representations\n """\n rug = [None] * self.trace_number\n for index in range(self.trace_number):\n rug[index] = dict(\n type="scatter",\n x=self.hist_data[index],\n y=([self.group_labels[index]] * len(self.hist_data[index])),\n xaxis="x1",\n yaxis="y2",\n mode="markers",\n name=self.group_labels[index],\n legendgroup=self.group_labels[index],\n showlegend=(False if self.show_hist or self.show_curve else True),\n text=self.rug_text[index],\n marker=dict(\n color=self.colors[index % len(self.colors)], symbol="line-ns-open"\n ),\n )\n return rug\n
.venv\Lib\site-packages\plotly\figure_factory\_distplot.py
_distplot.py
Python
14,099
0.95
0.113379
0.005362
vue-tools
854
2025-05-15T09:37:58.679534
MIT
false
d5cd7c57bd1dff58ee1ebb27bad919b2
from plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nfrom plotly.subplots import make_subplots\n\nimport math\nfrom numbers import Number\n\npd = optional_imports.get_module("pandas")\n\nTICK_COLOR = "#969696"\nAXIS_TITLE_COLOR = "#0f0f0f"\nAXIS_TITLE_SIZE = 12\nGRID_COLOR = "#ffffff"\nLEGEND_COLOR = "#efefef"\nPLOT_BGCOLOR = "#ededed"\nANNOT_RECT_COLOR = "#d0d0d0"\nLEGEND_BORDER_WIDTH = 1\nLEGEND_ANNOT_X = 1.05\nLEGEND_ANNOT_Y = 0.5\nMAX_TICKS_PER_AXIS = 5\nTHRES_FOR_FLIPPED_FACET_TITLES = 10\nGRID_WIDTH = 1\n\nVALID_TRACE_TYPES = ["scatter", "scattergl", "histogram", "bar", "box"]\n\nCUSTOM_LABEL_ERROR = (\n "If you are using a dictionary for custom labels for the facet row/col, "\n "make sure each key in that column of the dataframe is in your facet "\n "labels. The keys you need are {}"\n)\n\n\ndef _is_flipped(num):\n if num >= THRES_FOR_FLIPPED_FACET_TITLES:\n flipped = True\n else:\n flipped = False\n return flipped\n\n\ndef _return_label(original_label, facet_labels, facet_var):\n if isinstance(facet_labels, dict):\n label = facet_labels[original_label]\n elif isinstance(facet_labels, str):\n label = "{}: {}".format(facet_var, original_label)\n else:\n label = original_label\n return label\n\n\ndef _legend_annotation(color_name):\n legend_title = dict(\n textangle=0,\n xanchor="left",\n yanchor="middle",\n x=LEGEND_ANNOT_X,\n y=1.03,\n showarrow=False,\n xref="paper",\n yref="paper",\n text="factor({})".format(color_name),\n font=dict(size=13, color="#000000"),\n )\n return legend_title\n\n\ndef _annotation_dict(\n text, lane, num_of_lanes, SUBPLOT_SPACING, row_col="col", flipped=True\n):\n temp = (1 - (num_of_lanes - 1) * SUBPLOT_SPACING) / (num_of_lanes)\n if not flipped:\n xanchor = "center"\n yanchor = "middle"\n if row_col == "col":\n x = (lane - 1) * (temp + SUBPLOT_SPACING) + 0.5 * temp\n y = 1.03\n textangle = 0\n elif row_col == "row":\n y = (lane - 1) * (temp + SUBPLOT_SPACING) + 0.5 * temp\n x = 1.03\n textangle = 90\n else:\n if row_col == "col":\n xanchor = "center"\n yanchor = "bottom"\n x = (lane - 1) * (temp + SUBPLOT_SPACING) + 0.5 * temp\n y = 1.0\n textangle = 270\n elif row_col == "row":\n xanchor = "left"\n yanchor = "middle"\n y = (lane - 1) * (temp + SUBPLOT_SPACING) + 0.5 * temp\n x = 1.0\n textangle = 0\n\n annotation_dict = dict(\n textangle=textangle,\n xanchor=xanchor,\n yanchor=yanchor,\n x=x,\n y=y,\n showarrow=False,\n xref="paper",\n yref="paper",\n text=str(text),\n font=dict(size=13, color=AXIS_TITLE_COLOR),\n )\n return annotation_dict\n\n\ndef _axis_title_annotation(text, x_or_y_axis):\n if x_or_y_axis == "x":\n x_pos = 0.5\n y_pos = -0.1\n textangle = 0\n elif x_or_y_axis == "y":\n x_pos = -0.1\n y_pos = 0.5\n textangle = 270\n\n if not text:\n text = ""\n\n annot = {\n "font": {"color": "#000000", "size": AXIS_TITLE_SIZE},\n "showarrow": False,\n "text": text,\n "textangle": textangle,\n "x": x_pos,\n "xanchor": "center",\n "xref": "paper",\n "y": y_pos,\n "yanchor": "middle",\n "yref": "paper",\n }\n return annot\n\n\ndef _add_shapes_to_fig(fig, annot_rect_color, flipped_rows=False, flipped_cols=False):\n shapes_list = []\n for key in fig["layout"].to_plotly_json().keys():\n if "axis" in key and fig["layout"][key]["domain"] != [0.0, 1.0]:\n shape = {\n "fillcolor": annot_rect_color,\n "layer": "below",\n "line": {"color": annot_rect_color, "width": 1},\n "type": "rect",\n "xref": "paper",\n "yref": "paper",\n }\n\n if "xaxis" in key:\n shape["x0"] = fig["layout"][key]["domain"][0]\n shape["x1"] = fig["layout"][key]["domain"][1]\n shape["y0"] = 1.005\n shape["y1"] = 1.05\n\n if flipped_cols:\n shape["y1"] += 0.5\n shapes_list.append(shape)\n\n elif "yaxis" in key:\n shape["x0"] = 1.005\n shape["x1"] = 1.05\n shape["y0"] = fig["layout"][key]["domain"][0]\n shape["y1"] = fig["layout"][key]["domain"][1]\n\n if flipped_rows:\n shape["x1"] += 1\n shapes_list.append(shape)\n\n fig["layout"]["shapes"] = shapes_list\n\n\ndef _make_trace_for_scatter(trace, trace_type, color, **kwargs_marker):\n if trace_type in ["scatter", "scattergl"]:\n trace["mode"] = "markers"\n trace["marker"] = dict(color=color, **kwargs_marker)\n return trace\n\n\ndef _facet_grid_color_categorical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colormap,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n):\n fig = make_subplots(\n rows=num_of_rows,\n cols=num_of_cols,\n shared_xaxes=True,\n shared_yaxes=True,\n horizontal_spacing=SUBPLOT_SPACING,\n vertical_spacing=SUBPLOT_SPACING,\n print_grid=False,\n )\n\n annotations = []\n if not facet_row and not facet_col:\n color_groups = list(df.groupby(color_name))\n for group in color_groups:\n trace = dict(\n type=trace_type,\n name=group[0],\n marker=dict(color=colormap[group[0]]),\n **kwargs_trace,\n )\n if x:\n trace["x"] = group[1][x]\n if y:\n trace["y"] = group[1][y]\n trace = _make_trace_for_scatter(\n trace, trace_type, colormap[group[0]], **kwargs_marker\n )\n\n fig.append_trace(trace, 1, 1)\n\n elif (facet_row and not facet_col) or (not facet_row and facet_col):\n groups_by_facet = list(df.groupby(facet_row if facet_row else facet_col))\n for j, group in enumerate(groups_by_facet):\n for color_val in df[color_name].unique():\n data_by_color = group[1][group[1][color_name] == color_val]\n trace = dict(\n type=trace_type,\n name=color_val,\n marker=dict(color=colormap[color_val]),\n **kwargs_trace,\n )\n if x:\n trace["x"] = data_by_color[x]\n if y:\n trace["y"] = data_by_color[y]\n trace = _make_trace_for_scatter(\n trace, trace_type, colormap[color_val], **kwargs_marker\n )\n\n fig.append_trace(\n trace, j + 1 if facet_row else 1, 1 if facet_row else j + 1\n )\n\n label = _return_label(\n group[0],\n facet_row_labels if facet_row else facet_col_labels,\n facet_row if facet_row else facet_col,\n )\n\n annotations.append(\n _annotation_dict(\n label,\n num_of_rows - j if facet_row else j + 1,\n num_of_rows if facet_row else num_of_cols,\n SUBPLOT_SPACING,\n "row" if facet_row else "col",\n flipped_rows,\n )\n )\n\n elif facet_row and facet_col:\n groups_by_facets = list(df.groupby([facet_row, facet_col]))\n tuple_to_facet_group = {item[0]: item[1] for item in groups_by_facets}\n\n row_values = df[facet_row].unique()\n col_values = df[facet_col].unique()\n color_vals = df[color_name].unique()\n for row_count, x_val in enumerate(row_values):\n for col_count, y_val in enumerate(col_values):\n try:\n group = tuple_to_facet_group[(x_val, y_val)]\n except KeyError:\n group = pd.DataFrame(\n [[None, None, None]], columns=[x, y, color_name]\n )\n\n for color_val in color_vals:\n if group.values.tolist() != [[None, None, None]]:\n group_filtered = group[group[color_name] == color_val]\n\n trace = dict(\n type=trace_type,\n name=color_val,\n marker=dict(color=colormap[color_val]),\n **kwargs_trace,\n )\n new_x = group_filtered[x]\n new_y = group_filtered[y]\n else:\n trace = dict(\n type=trace_type,\n name=color_val,\n marker=dict(color=colormap[color_val]),\n showlegend=False,\n **kwargs_trace,\n )\n new_x = group[x]\n new_y = group[y]\n\n if x:\n trace["x"] = new_x\n if y:\n trace["y"] = new_y\n trace = _make_trace_for_scatter(\n trace, trace_type, colormap[color_val], **kwargs_marker\n )\n\n fig.append_trace(trace, row_count + 1, col_count + 1)\n if row_count == 0:\n label = _return_label(\n col_values[col_count], facet_col_labels, facet_col\n )\n annotations.append(\n _annotation_dict(\n label,\n col_count + 1,\n num_of_cols,\n SUBPLOT_SPACING,\n row_col="col",\n flipped=flipped_cols,\n )\n )\n label = _return_label(row_values[row_count], facet_row_labels, facet_row)\n annotations.append(\n _annotation_dict(\n label,\n num_of_rows - row_count,\n num_of_rows,\n SUBPLOT_SPACING,\n row_col="row",\n flipped=flipped_rows,\n )\n )\n\n return fig, annotations\n\n\ndef _facet_grid_color_numerical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colormap,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n):\n fig = make_subplots(\n rows=num_of_rows,\n cols=num_of_cols,\n shared_xaxes=True,\n shared_yaxes=True,\n horizontal_spacing=SUBPLOT_SPACING,\n vertical_spacing=SUBPLOT_SPACING,\n print_grid=False,\n )\n\n annotations = []\n if not facet_row and not facet_col:\n trace = dict(\n type=trace_type,\n marker=dict(color=df[color_name], colorscale=colormap, showscale=True),\n **kwargs_trace,\n )\n if x:\n trace["x"] = df[x]\n if y:\n trace["y"] = df[y]\n trace = _make_trace_for_scatter(\n trace, trace_type, df[color_name], **kwargs_marker\n )\n\n fig.append_trace(trace, 1, 1)\n\n if (facet_row and not facet_col) or (not facet_row and facet_col):\n groups_by_facet = list(df.groupby(facet_row if facet_row else facet_col))\n for j, group in enumerate(groups_by_facet):\n trace = dict(\n type=trace_type,\n marker=dict(\n color=df[color_name],\n colorscale=colormap,\n showscale=True,\n colorbar=dict(x=1.15),\n ),\n **kwargs_trace,\n )\n if x:\n trace["x"] = group[1][x]\n if y:\n trace["y"] = group[1][y]\n trace = _make_trace_for_scatter(\n trace, trace_type, df[color_name], **kwargs_marker\n )\n\n fig.append_trace(\n trace, j + 1 if facet_row else 1, 1 if facet_row else j + 1\n )\n\n labels = facet_row_labels if facet_row else facet_col_labels\n label = _return_label(\n group[0], labels, facet_row if facet_row else facet_col\n )\n\n annotations.append(\n _annotation_dict(\n label,\n num_of_rows - j if facet_row else j + 1,\n num_of_rows if facet_row else num_of_cols,\n SUBPLOT_SPACING,\n "row" if facet_row else "col",\n flipped=flipped_rows,\n )\n )\n\n elif facet_row and facet_col:\n groups_by_facets = list(df.groupby([facet_row, facet_col]))\n tuple_to_facet_group = {item[0]: item[1] for item in groups_by_facets}\n\n row_values = df[facet_row].unique()\n col_values = df[facet_col].unique()\n for row_count, x_val in enumerate(row_values):\n for col_count, y_val in enumerate(col_values):\n try:\n group = tuple_to_facet_group[(x_val, y_val)]\n except KeyError:\n group = pd.DataFrame(\n [[None, None, None]], columns=[x, y, color_name]\n )\n\n if group.values.tolist() != [[None, None, None]]:\n trace = dict(\n type=trace_type,\n marker=dict(\n color=df[color_name],\n colorscale=colormap,\n showscale=(row_count == 0),\n colorbar=dict(x=1.15),\n ),\n **kwargs_trace,\n )\n\n else:\n trace = dict(type=trace_type, showlegend=False, **kwargs_trace)\n\n if x:\n trace["x"] = group[x]\n if y:\n trace["y"] = group[y]\n trace = _make_trace_for_scatter(\n trace, trace_type, df[color_name], **kwargs_marker\n )\n\n fig.append_trace(trace, row_count + 1, col_count + 1)\n if row_count == 0:\n label = _return_label(\n col_values[col_count], facet_col_labels, facet_col\n )\n annotations.append(\n _annotation_dict(\n label,\n col_count + 1,\n num_of_cols,\n SUBPLOT_SPACING,\n row_col="col",\n flipped=flipped_cols,\n )\n )\n label = _return_label(row_values[row_count], facet_row_labels, facet_row)\n annotations.append(\n _annotation_dict(\n row_values[row_count],\n num_of_rows - row_count,\n num_of_rows,\n SUBPLOT_SPACING,\n row_col="row",\n flipped=flipped_rows,\n )\n )\n\n return fig, annotations\n\n\ndef _facet_grid(\n df,\n x,\n y,\n facet_row,\n facet_col,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n):\n fig = make_subplots(\n rows=num_of_rows,\n cols=num_of_cols,\n shared_xaxes=True,\n shared_yaxes=True,\n horizontal_spacing=SUBPLOT_SPACING,\n vertical_spacing=SUBPLOT_SPACING,\n print_grid=False,\n )\n annotations = []\n if not facet_row and not facet_col:\n trace = dict(\n type=trace_type,\n marker=dict(color=marker_color, line=kwargs_marker["line"]),\n **kwargs_trace,\n )\n\n if x:\n trace["x"] = df[x]\n if y:\n trace["y"] = df[y]\n trace = _make_trace_for_scatter(\n trace, trace_type, marker_color, **kwargs_marker\n )\n\n fig.append_trace(trace, 1, 1)\n\n elif (facet_row and not facet_col) or (not facet_row and facet_col):\n groups_by_facet = list(df.groupby(facet_row if facet_row else facet_col))\n for j, group in enumerate(groups_by_facet):\n trace = dict(\n type=trace_type,\n marker=dict(color=marker_color, line=kwargs_marker["line"]),\n **kwargs_trace,\n )\n\n if x:\n trace["x"] = group[1][x]\n if y:\n trace["y"] = group[1][y]\n trace = _make_trace_for_scatter(\n trace, trace_type, marker_color, **kwargs_marker\n )\n\n fig.append_trace(\n trace, j + 1 if facet_row else 1, 1 if facet_row else j + 1\n )\n\n label = _return_label(\n group[0],\n facet_row_labels if facet_row else facet_col_labels,\n facet_row if facet_row else facet_col,\n )\n\n annotations.append(\n _annotation_dict(\n label,\n num_of_rows - j if facet_row else j + 1,\n num_of_rows if facet_row else num_of_cols,\n SUBPLOT_SPACING,\n "row" if facet_row else "col",\n flipped_rows,\n )\n )\n\n elif facet_row and facet_col:\n groups_by_facets = list(df.groupby([facet_row, facet_col]))\n tuple_to_facet_group = {item[0]: item[1] for item in groups_by_facets}\n\n row_values = df[facet_row].unique()\n col_values = df[facet_col].unique()\n for row_count, x_val in enumerate(row_values):\n for col_count, y_val in enumerate(col_values):\n try:\n group = tuple_to_facet_group[(x_val, y_val)]\n except KeyError:\n group = pd.DataFrame([[None, None]], columns=[x, y])\n trace = dict(\n type=trace_type,\n marker=dict(color=marker_color, line=kwargs_marker["line"]),\n **kwargs_trace,\n )\n if x:\n trace["x"] = group[x]\n if y:\n trace["y"] = group[y]\n trace = _make_trace_for_scatter(\n trace, trace_type, marker_color, **kwargs_marker\n )\n\n fig.append_trace(trace, row_count + 1, col_count + 1)\n if row_count == 0:\n label = _return_label(\n col_values[col_count], facet_col_labels, facet_col\n )\n annotations.append(\n _annotation_dict(\n label,\n col_count + 1,\n num_of_cols,\n SUBPLOT_SPACING,\n row_col="col",\n flipped=flipped_cols,\n )\n )\n\n label = _return_label(row_values[row_count], facet_row_labels, facet_row)\n annotations.append(\n _annotation_dict(\n label,\n num_of_rows - row_count,\n num_of_rows,\n SUBPLOT_SPACING,\n row_col="row",\n flipped=flipped_rows,\n )\n )\n\n return fig, annotations\n\n\ndef create_facet_grid(\n df,\n x=None,\n y=None,\n facet_row=None,\n facet_col=None,\n color_name=None,\n colormap=None,\n color_is_cat=False,\n facet_row_labels=None,\n facet_col_labels=None,\n height=None,\n width=None,\n trace_type="scatter",\n scales="fixed",\n dtick_x=None,\n dtick_y=None,\n show_boxes=True,\n ggplot2=False,\n binsize=1,\n **kwargs,\n):\n """\n Returns figure for facet grid; **this function is deprecated**, since\n plotly.express functions should be used instead, for example\n\n >>> import plotly.express as px\n >>> tips = px.data.tips()\n >>> fig = px.scatter(tips,\n ... x='total_bill',\n ... y='tip',\n ... facet_row='sex',\n ... facet_col='smoker',\n ... color='size')\n\n\n :param (pd.DataFrame) df: the dataframe of columns for the facet grid.\n :param (str) x: the name of the dataframe column for the x axis data.\n :param (str) y: the name of the dataframe column for the y axis data.\n :param (str) facet_row: the name of the dataframe column that is used to\n facet the grid into row panels.\n :param (str) facet_col: the name of the dataframe column that is used to\n facet the grid into column panels.\n :param (str) color_name: the name of your dataframe column that will\n function as the colormap variable.\n :param (str|list|dict) colormap: the param that determines how the\n color_name column colors the data. If the dataframe contains numeric\n data, then a dictionary of colors will group the data categorically\n while a Plotly Colorscale name or a custom colorscale will treat it\n numerically. To learn more about colors and types of colormap, run\n `help(plotly.colors)`.\n :param (bool) color_is_cat: determines whether a numerical column for the\n colormap will be treated as categorical (True) or sequential (False).\n Default = False.\n :param (str|dict) facet_row_labels: set to either 'name' or a dictionary\n of all the unique values in the faceting row mapped to some text to\n show up in the label annotations. If None, labeling works like usual.\n :param (str|dict) facet_col_labels: set to either 'name' or a dictionary\n of all the values in the faceting row mapped to some text to show up\n in the label annotations. If None, labeling works like usual.\n :param (int) height: the height of the facet grid figure.\n :param (int) width: the width of the facet grid figure.\n :param (str) trace_type: decides the type of plot to appear in the\n facet grid. The options are 'scatter', 'scattergl', 'histogram',\n 'bar', and 'box'.\n Default = 'scatter'.\n :param (str) scales: determines if axes have fixed ranges or not. Valid\n settings are 'fixed' (all axes fixed), 'free_x' (x axis free only),\n 'free_y' (y axis free only) or 'free' (both axes free).\n :param (float) dtick_x: determines the distance between each tick on the\n x-axis. Default is None which means dtick_x is set automatically.\n :param (float) dtick_y: determines the distance between each tick on the\n y-axis. Default is None which means dtick_y is set automatically.\n :param (bool) show_boxes: draws grey boxes behind the facet titles.\n :param (bool) ggplot2: draws the facet grid in the style of `ggplot2`. See\n http://ggplot2.tidyverse.org/reference/facet_grid.html for reference.\n Default = False\n :param (int) binsize: groups all data into bins of a given length.\n :param (dict) kwargs: a dictionary of scatterplot arguments.\n\n Examples 1: One Way Faceting\n\n >>> import plotly.figure_factory as ff\n >>> import pandas as pd\n >>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')\n\n >>> fig = ff.create_facet_grid(\n ... mpg,\n ... x='displ',\n ... y='cty',\n ... facet_col='cyl',\n ... )\n >>> fig.show()\n\n Example 2: Two Way Faceting\n\n >>> import plotly.figure_factory as ff\n\n >>> import pandas as pd\n\n >>> mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')\n\n >>> fig = ff.create_facet_grid(\n ... mpg,\n ... x='displ',\n ... y='cty',\n ... facet_row='drv',\n ... facet_col='cyl',\n ... )\n >>> fig.show()\n\n Example 3: Categorical Coloring\n\n >>> import plotly.figure_factory as ff\n >>> import pandas as pd\n >>> mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')\n >>> mtcars.cyl = mtcars.cyl.astype(str)\n >>> fig = ff.create_facet_grid(\n ... mtcars,\n ... x='mpg',\n ... y='wt',\n ... facet_col='cyl',\n ... color_name='cyl',\n ... color_is_cat=True,\n ... )\n >>> fig.show()\n\n\n """\n if not pd:\n raise ImportError("'pandas' must be installed for this figure_factory.")\n\n if not isinstance(df, pd.DataFrame):\n raise exceptions.PlotlyError("You must input a pandas DataFrame.")\n\n # make sure all columns are of homogenous datatype\n utils.validate_dataframe(df)\n\n if trace_type in ["scatter", "scattergl"]:\n if not x or not y:\n raise exceptions.PlotlyError(\n "You need to input 'x' and 'y' if you are you are using a "\n "trace_type of 'scatter' or 'scattergl'."\n )\n\n for key in [x, y, facet_row, facet_col, color_name]:\n if key is not None:\n try:\n df[key]\n except KeyError:\n raise exceptions.PlotlyError(\n "x, y, facet_row, facet_col and color_name must be keys "\n "in your dataframe."\n )\n # autoscale histogram bars\n if trace_type not in ["scatter", "scattergl"]:\n scales = "free"\n\n # validate scales\n if scales not in ["fixed", "free_x", "free_y", "free"]:\n raise exceptions.PlotlyError(\n "'scales' must be set to 'fixed', 'free_x', 'free_y' and 'free'."\n )\n\n if trace_type not in VALID_TRACE_TYPES:\n raise exceptions.PlotlyError(\n "'trace_type' must be in {}".format(VALID_TRACE_TYPES)\n )\n\n if trace_type == "histogram":\n SUBPLOT_SPACING = 0.06\n else:\n SUBPLOT_SPACING = 0.015\n\n # seperate kwargs for marker and else\n if "marker" in kwargs:\n kwargs_marker = kwargs["marker"]\n else:\n kwargs_marker = {}\n marker_color = kwargs_marker.pop("color", None)\n kwargs.pop("marker", None)\n kwargs_trace = kwargs\n\n if "size" not in kwargs_marker:\n if ggplot2:\n kwargs_marker["size"] = 5\n else:\n kwargs_marker["size"] = 8\n\n if "opacity" not in kwargs_marker:\n if not ggplot2:\n kwargs_trace["opacity"] = 0.6\n\n if "line" not in kwargs_marker:\n if not ggplot2:\n kwargs_marker["line"] = {"color": "darkgrey", "width": 1}\n else:\n kwargs_marker["line"] = {}\n\n # default marker size\n if not ggplot2:\n if not marker_color:\n marker_color = "rgb(31, 119, 180)"\n else:\n marker_color = "rgb(0, 0, 0)"\n\n num_of_rows = 1\n num_of_cols = 1\n flipped_rows = False\n flipped_cols = False\n if facet_row:\n num_of_rows = len(df[facet_row].unique())\n flipped_rows = _is_flipped(num_of_rows)\n if isinstance(facet_row_labels, dict):\n for key in df[facet_row].unique():\n if key not in facet_row_labels.keys():\n unique_keys = df[facet_row].unique().tolist()\n raise exceptions.PlotlyError(CUSTOM_LABEL_ERROR.format(unique_keys))\n if facet_col:\n num_of_cols = len(df[facet_col].unique())\n flipped_cols = _is_flipped(num_of_cols)\n if isinstance(facet_col_labels, dict):\n for key in df[facet_col].unique():\n if key not in facet_col_labels.keys():\n unique_keys = df[facet_col].unique().tolist()\n raise exceptions.PlotlyError(CUSTOM_LABEL_ERROR.format(unique_keys))\n show_legend = False\n if color_name:\n if isinstance(df[color_name].iloc[0], str) or color_is_cat:\n show_legend = True\n if isinstance(colormap, dict):\n clrs.validate_colors_dict(colormap, "rgb")\n\n for val in df[color_name].unique():\n if val not in colormap.keys():\n raise exceptions.PlotlyError(\n "If using 'colormap' as a dictionary, make sure "\n "all the values of the colormap column are in "\n "the keys of your dictionary."\n )\n else:\n # use default plotly colors for dictionary\n default_colors = clrs.DEFAULT_PLOTLY_COLORS\n colormap = {}\n j = 0\n for val in df[color_name].unique():\n if j >= len(default_colors):\n j = 0\n colormap[val] = default_colors[j]\n j += 1\n fig, annotations = _facet_grid_color_categorical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colormap,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n\n elif isinstance(df[color_name].iloc[0], Number):\n if isinstance(colormap, dict):\n show_legend = True\n clrs.validate_colors_dict(colormap, "rgb")\n\n for val in df[color_name].unique():\n if val not in colormap.keys():\n raise exceptions.PlotlyError(\n "If using 'colormap' as a dictionary, make sure "\n "all the values of the colormap column are in "\n "the keys of your dictionary."\n )\n fig, annotations = _facet_grid_color_categorical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colormap,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n\n elif isinstance(colormap, list):\n colorscale_list = colormap\n clrs.validate_colorscale(colorscale_list)\n\n fig, annotations = _facet_grid_color_numerical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colorscale_list,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n elif isinstance(colormap, str):\n if colormap in clrs.PLOTLY_SCALES.keys():\n colorscale_list = clrs.PLOTLY_SCALES[colormap]\n else:\n raise exceptions.PlotlyError(\n "If 'colormap' is a string, it must be the name "\n "of a Plotly Colorscale. The available colorscale "\n "names are {}".format(clrs.PLOTLY_SCALES.keys())\n )\n fig, annotations = _facet_grid_color_numerical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colorscale_list,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n else:\n colorscale_list = clrs.PLOTLY_SCALES["Reds"]\n fig, annotations = _facet_grid_color_numerical(\n df,\n x,\n y,\n facet_row,\n facet_col,\n color_name,\n colorscale_list,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n\n else:\n fig, annotations = _facet_grid(\n df,\n x,\n y,\n facet_row,\n facet_col,\n num_of_rows,\n num_of_cols,\n facet_row_labels,\n facet_col_labels,\n trace_type,\n flipped_rows,\n flipped_cols,\n show_boxes,\n SUBPLOT_SPACING,\n marker_color,\n kwargs_trace,\n kwargs_marker,\n )\n\n if not height:\n height = max(600, 100 * num_of_rows)\n if not width:\n width = max(600, 100 * num_of_cols)\n\n fig["layout"].update(\n height=height, width=width, title="", paper_bgcolor="rgb(251, 251, 251)"\n )\n if ggplot2:\n fig["layout"].update(\n plot_bgcolor=PLOT_BGCOLOR,\n paper_bgcolor="rgb(255, 255, 255)",\n hovermode="closest",\n )\n\n # axis titles\n x_title_annot = _axis_title_annotation(x, "x")\n y_title_annot = _axis_title_annotation(y, "y")\n\n # annotations\n annotations.append(x_title_annot)\n annotations.append(y_title_annot)\n\n # legend\n fig["layout"]["showlegend"] = show_legend\n fig["layout"]["legend"]["bgcolor"] = LEGEND_COLOR\n fig["layout"]["legend"]["borderwidth"] = LEGEND_BORDER_WIDTH\n fig["layout"]["legend"]["x"] = 1.05\n fig["layout"]["legend"]["y"] = 1\n fig["layout"]["legend"]["yanchor"] = "top"\n\n if show_legend:\n fig["layout"]["showlegend"] = show_legend\n if ggplot2:\n if color_name:\n legend_annot = _legend_annotation(color_name)\n annotations.append(legend_annot)\n fig["layout"]["margin"]["r"] = 150\n\n # assign annotations to figure\n fig["layout"]["annotations"] = annotations\n\n # add shaded boxes behind axis titles\n if show_boxes and ggplot2:\n _add_shapes_to_fig(fig, ANNOT_RECT_COLOR, flipped_rows, flipped_cols)\n\n # all xaxis and yaxis labels\n axis_labels = {"x": [], "y": []}\n for key in fig["layout"]:\n if "xaxis" in key:\n axis_labels["x"].append(key)\n elif "yaxis" in key:\n axis_labels["y"].append(key)\n\n string_number_in_data = False\n for var in [v for v in [x, y] if v]:\n if isinstance(df[var].tolist()[0], str):\n for item in df[var]:\n try:\n int(item)\n string_number_in_data = True\n except ValueError:\n pass\n\n if string_number_in_data:\n for x_y in axis_labels.keys():\n for axis_name in axis_labels[x_y]:\n fig["layout"][axis_name]["type"] = "category"\n\n if scales == "fixed":\n fixed_axes = ["x", "y"]\n elif scales == "free_x":\n fixed_axes = ["y"]\n elif scales == "free_y":\n fixed_axes = ["x"]\n elif scales == "free":\n fixed_axes = []\n\n # fixed ranges\n for x_y in fixed_axes:\n min_ranges = []\n max_ranges = []\n for trace in fig["data"]:\n if trace[x_y] is not None and len(trace[x_y]) > 0:\n min_ranges.append(min(trace[x_y]))\n max_ranges.append(max(trace[x_y]))\n while None in min_ranges:\n min_ranges.remove(None)\n while None in max_ranges:\n max_ranges.remove(None)\n\n min_range = min(min_ranges)\n max_range = max(max_ranges)\n\n range_are_numbers = isinstance(min_range, Number) and isinstance(\n max_range, Number\n )\n\n if range_are_numbers:\n min_range = math.floor(min_range)\n max_range = math.ceil(max_range)\n\n # extend widen frame by 5% on each side\n min_range -= 0.05 * (max_range - min_range)\n max_range += 0.05 * (max_range - min_range)\n\n if x_y == "x":\n if dtick_x:\n dtick = dtick_x\n else:\n dtick = math.floor((max_range - min_range) / MAX_TICKS_PER_AXIS)\n elif x_y == "y":\n if dtick_y:\n dtick = dtick_y\n else:\n dtick = math.floor((max_range - min_range) / MAX_TICKS_PER_AXIS)\n else:\n dtick = 1\n\n for axis_title in axis_labels[x_y]:\n fig["layout"][axis_title]["dtick"] = dtick\n fig["layout"][axis_title]["ticklen"] = 0\n fig["layout"][axis_title]["zeroline"] = False\n if ggplot2:\n fig["layout"][axis_title]["tickwidth"] = 1\n fig["layout"][axis_title]["ticklen"] = 4\n fig["layout"][axis_title]["gridwidth"] = GRID_WIDTH\n\n fig["layout"][axis_title]["gridcolor"] = GRID_COLOR\n fig["layout"][axis_title]["gridwidth"] = 2\n fig["layout"][axis_title]["tickfont"] = {\n "color": TICK_COLOR,\n "size": 10,\n }\n\n # insert ranges into fig\n if x_y in fixed_axes:\n for key in fig["layout"]:\n if "{}axis".format(x_y) in key and range_are_numbers:\n fig["layout"][key]["range"] = [min_range, max_range]\n\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_facet_grid.py
_facet_grid.py
Python
39,426
0.95
0.152301
0.02439
python-kit
485
2025-04-03T17:50:10.716923
MIT
false
0016eb18da612ffe5723eedccaad5e30
from numbers import Number\n\nimport copy\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nimport plotly.graph_objects as go\n\npd = optional_imports.get_module("pandas")\n\nREQUIRED_GANTT_KEYS = ["Task", "Start", "Finish"]\n\n\ndef _get_corner_points(x0, y0, x1, y1):\n """\n Returns the corner points of a scatter rectangle\n\n :param x0: x-start\n :param y0: y-lower\n :param x1: x-end\n :param y1: y-upper\n :return: ([x], [y]), tuple of lists containing the x and y values\n """\n\n return ([x0, x1, x1, x0], [y0, y0, y1, y1])\n\n\ndef validate_gantt(df):\n """\n Validates the inputted dataframe or list\n """\n if pd and isinstance(df, pd.core.frame.DataFrame):\n # validate that df has all the required keys\n for key in REQUIRED_GANTT_KEYS:\n if key not in df:\n raise exceptions.PlotlyError(\n "The columns in your dataframe must include the "\n "following keys: {0}".format(", ".join(REQUIRED_GANTT_KEYS))\n )\n\n num_of_rows = len(df.index)\n chart = []\n for index in range(num_of_rows):\n task_dict = {}\n for key in df:\n task_dict[key] = df.iloc[index][key]\n chart.append(task_dict)\n\n return chart\n\n # validate if df is a list\n if not isinstance(df, list):\n raise exceptions.PlotlyError(\n "You must input either a dataframe or a list of dictionaries."\n )\n\n # validate if df is empty\n if len(df) <= 0:\n raise exceptions.PlotlyError(\n "Your list is empty. It must contain at least one dictionary."\n )\n if not isinstance(df[0], dict):\n raise exceptions.PlotlyError("Your list must only include dictionaries.")\n return df\n\n\ndef gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n show_colorbar=True,\n):\n """\n Refer to create_gantt() for docstring\n """\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index]["Start"],\n x1=chart[index]["Finish"],\n name=chart[index]["Task"],\n )\n if "Description" in chart[index]:\n task["description"] = chart[index]["Description"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = "name"\n else:\n hoverinfo = "skip"\n\n scatter_data_template = {\n "x": [],\n "y": [],\n "mode": "none",\n "fill": "toself",\n "hoverinfo": hoverinfo,\n }\n\n marker_data_template = {\n "x": [],\n "y": [],\n "mode": "markers",\n "text": [],\n "marker": dict(color="", size=1, opacity=0),\n "name": "",\n "showlegend": False,\n }\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n color_index = 0\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n del tasks[index]["name"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index]["y0"] = groupID - bar_width\n tasks[index]["y1"] = groupID + bar_width\n\n # check if colors need to be looped\n if color_index >= len(colors):\n color_index = 0\n tasks[index]["fillcolor"] = colors[color_index]\n color_id = tasks[index]["fillcolor"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id]["fillcolor"] = color_id\n scatter_data_dict[color_id]["name"] = str(tn)\n scatter_data_dict[color_id]["legendgroup"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id]["x"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id]["x"].append(\n scatter_data_dict[color_id]["x"][-1]\n )\n scatter_data_dict[color_id]["y"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index]["x0"],\n tasks[index]["y0"],\n tasks[index]["x1"],\n tasks[index]["y1"],\n )\n\n scatter_data_dict[color_id]["x"] += xs\n scatter_data_dict[color_id]["y"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id]["marker"]["color"] = color_id\n marker_data_dict[color_id]["legendgroup"] = color_id\n\n marker_data_dict[color_id]["x"].append(tasks[index]["x0"])\n marker_data_dict[color_id]["x"].append(tasks[index]["x1"])\n marker_data_dict[color_id]["y"].append(groupID)\n marker_data_dict[color_id]["y"].append(groupID)\n\n if "description" in tasks[index]:\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n del tasks[index]["description"]\n else:\n marker_data_dict[color_id]["text"].append(None)\n marker_data_dict[color_id]["text"].append(None)\n\n color_index += 1\n\n showlegend = show_colorbar\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode="closest",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label="1w", step="day", stepmode="backward"),\n dict(count=1, label="1m", step="month", stepmode="backward"),\n dict(count=6, label="6m", step="month", stepmode="backward"),\n dict(count=1, label="YTD", step="year", stepmode="todate"),\n dict(count=1, label="1y", step="year", stepmode="backward"),\n dict(step="all"),\n ]\n )\n ),\n type="date",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n """\n Refer to FigureFactory.create_gantt() for docstring\n """\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index]["Start"],\n x1=chart[index]["Finish"],\n name=chart[index]["Task"],\n )\n if "Description" in chart[index]:\n task["description"] = chart[index]["Description"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = "name"\n else:\n hoverinfo = "skip"\n\n scatter_data_template = {\n "x": [],\n "y": [],\n "mode": "none",\n "fill": "toself",\n "showlegend": False,\n "hoverinfo": hoverinfo,\n "legendgroup": "",\n }\n\n marker_data_template = {\n "x": [],\n "y": [],\n "mode": "markers",\n "text": [],\n "marker": dict(color="", size=1, opacity=0),\n "name": "",\n "showlegend": False,\n "legendgroup": "",\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # compute the color for task based on indexing column\n if isinstance(chart[0][index_col], Number):\n # check that colors has at least 2 colors\n if len(colors) < 2:\n raise exceptions.PlotlyError(\n "You must use at least 2 colors in 'colors' if you "\n "are using a colorscale. However only the first two "\n "colors given will be used for the lower and upper "\n "bounds on the colormap."\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n del tasks[index]["name"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index]["y0"] = groupID - bar_width\n tasks[index]["y1"] = groupID + bar_width\n\n # unlabel color\n colors = clrs.color_parser(colors, clrs.unlabel_rgb)\n lowcolor = colors[0]\n highcolor = colors[1]\n\n intermed = (chart[index][index_col]) / 100.0\n intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)\n intermed_color = clrs.color_parser(intermed_color, clrs.label_rgb)\n tasks[index]["fillcolor"] = intermed_color\n color_id = tasks[index]["fillcolor"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id]["fillcolor"] = color_id\n scatter_data_dict[color_id]["name"] = str(chart[index][index_col])\n scatter_data_dict[color_id]["legendgroup"] = color_id\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id]["x"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id]["x"].append(\n scatter_data_dict[color_id]["x"][-1]\n )\n scatter_data_dict[color_id]["y"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index]["x0"],\n tasks[index]["y0"],\n tasks[index]["x1"],\n tasks[index]["y1"],\n )\n\n scatter_data_dict[color_id]["x"] += xs\n scatter_data_dict[color_id]["y"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id]["marker"]["color"] = color_id\n marker_data_dict[color_id]["legendgroup"] = color_id\n\n marker_data_dict[color_id]["x"].append(tasks[index]["x0"])\n marker_data_dict[color_id]["x"].append(tasks[index]["x1"])\n marker_data_dict[color_id]["y"].append(groupID)\n marker_data_dict[color_id]["y"].append(groupID)\n\n if "description" in tasks[index]:\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n del tasks[index]["description"]\n else:\n marker_data_dict[color_id]["text"].append(None)\n marker_data_dict[color_id]["text"].append(None)\n\n # add colorbar to one of the traces randomly just for display\n if show_colorbar is True:\n k = list(marker_data_dict.keys())[0]\n marker_data_dict[k]["marker"].update(\n dict(\n colorscale=[[0, colors[0]], [1, colors[1]]],\n showscale=True,\n cmax=100,\n cmin=0,\n )\n )\n\n if isinstance(chart[0][index_col], str):\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n if len(colors) < len(index_vals):\n raise exceptions.PlotlyError(\n "Error. The number of colors in 'colors' must be no less "\n "than the number of unique index values in your group "\n "column."\n )\n\n # make a dictionary assignment to each index value\n index_vals_dict = {}\n # define color index\n c_index = 0\n for key in index_vals:\n if c_index > len(colors) - 1:\n c_index = 0\n index_vals_dict[key] = colors[c_index]\n c_index += 1\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n del tasks[index]["name"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index]["y0"] = groupID - bar_width\n tasks[index]["y1"] = groupID + bar_width\n\n tasks[index]["fillcolor"] = index_vals_dict[chart[index][index_col]]\n color_id = tasks[index]["fillcolor"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id]["fillcolor"] = color_id\n scatter_data_dict[color_id]["legendgroup"] = color_id\n scatter_data_dict[color_id]["name"] = str(chart[index][index_col])\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id]["x"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id]["x"].append(\n scatter_data_dict[color_id]["x"][-1]\n )\n scatter_data_dict[color_id]["y"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index]["x0"],\n tasks[index]["y0"],\n tasks[index]["x1"],\n tasks[index]["y1"],\n )\n\n scatter_data_dict[color_id]["x"] += xs\n scatter_data_dict[color_id]["y"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id]["marker"]["color"] = color_id\n marker_data_dict[color_id]["legendgroup"] = color_id\n\n marker_data_dict[color_id]["x"].append(tasks[index]["x0"])\n marker_data_dict[color_id]["x"].append(tasks[index]["x1"])\n marker_data_dict[color_id]["y"].append(groupID)\n marker_data_dict[color_id]["y"].append(groupID)\n\n if "description" in tasks[index]:\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n del tasks[index]["description"]\n else:\n marker_data_dict[color_id]["text"].append(None)\n marker_data_dict[color_id]["text"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n for k in scatter_data_dict:\n scatter_data_dict[k]["showlegend"] = showlegend\n # add colorbar to one of the traces randomly just for display\n # if show_colorbar is True:\n # k = list(marker_data_dict.keys())[0]\n # marker_data_dict[k]["marker"].update(\n # dict(\n # colorscale=[[0, colors[0]], [1, colors[1]]],\n # showscale=True,\n # cmax=100,\n # cmin=0,\n # )\n # )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode="closest",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label="1w", step="day", stepmode="backward"),\n dict(count=1, label="1m", step="month", stepmode="backward"),\n dict(count=6, label="6m", step="month", stepmode="backward"),\n dict(count=1, label="YTD", step="year", stepmode="todate"),\n dict(count=1, label="1y", step="year", stepmode="backward"),\n dict(step="all"),\n ]\n )\n ),\n type="date",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n """\n Refer to FigureFactory.create_gantt() for docstring\n """\n\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index]["Start"],\n x1=chart[index]["Finish"],\n name=chart[index]["Task"],\n )\n if "Description" in chart[index]:\n task["description"] = chart[index]["Description"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = "name"\n else:\n hoverinfo = "skip"\n\n scatter_data_template = {\n "x": [],\n "y": [],\n "mode": "none",\n "fill": "toself",\n "hoverinfo": hoverinfo,\n "legendgroup": "",\n }\n\n marker_data_template = {\n "x": [],\n "y": [],\n "mode": "markers",\n "text": [],\n "marker": dict(color="", size=1, opacity=0),\n "name": "",\n "showlegend": False,\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # verify each value in index column appears in colors dictionary\n for key in index_vals:\n if key not in colors:\n raise exceptions.PlotlyError(\n "If you are using colors as a dictionary, all of its "\n "keys must be all the values in the index column."\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index]["name"]\n del tasks[index]["name"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index]["y0"] = groupID - bar_width\n tasks[index]["y1"] = groupID + bar_width\n\n tasks[index]["fillcolor"] = colors[chart[index][index_col]]\n color_id = tasks[index]["fillcolor"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id]["legendgroup"] = color_id\n scatter_data_dict[color_id]["fillcolor"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id]["x"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id]["x"].append(\n scatter_data_dict[color_id]["x"][-1]\n )\n scatter_data_dict[color_id]["y"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index]["x0"],\n tasks[index]["y0"],\n tasks[index]["x1"],\n tasks[index]["y1"],\n )\n\n scatter_data_dict[color_id]["x"] += xs\n scatter_data_dict[color_id]["y"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id]["marker"]["color"] = color_id\n marker_data_dict[color_id]["legendgroup"] = color_id\n\n marker_data_dict[color_id]["x"].append(tasks[index]["x0"])\n marker_data_dict[color_id]["x"].append(tasks[index]["x1"])\n marker_data_dict[color_id]["y"].append(groupID)\n marker_data_dict[color_id]["y"].append(groupID)\n\n if "description" in tasks[index]:\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n marker_data_dict[color_id]["text"].append(tasks[index]["description"])\n del tasks[index]["description"]\n else:\n marker_data_dict[color_id]["text"].append(None)\n marker_data_dict[color_id]["text"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n\n for index_value in index_vals:\n scatter_data_dict[colors[index_value]]["name"] = str(index_value)\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode="closest",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label="1w", step="day", stepmode="backward"),\n dict(count=1, label="1m", step="month", stepmode="backward"),\n dict(count=6, label="6m", step="month", stepmode="backward"),\n dict(count=1, label="YTD", step="year", stepmode="todate"),\n dict(count=1, label="1y", step="year", stepmode="backward"),\n dict(step="all"),\n ]\n )\n ),\n type="date",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef create_gantt(\n df,\n colors=None,\n index_col=None,\n show_colorbar=False,\n reverse_colors=False,\n title="Gantt Chart",\n bar_width=0.2,\n showgrid_x=False,\n showgrid_y=False,\n height=600,\n width=None,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n """\n **deprecated**, use instead\n :func:`plotly.express.timeline`.\n\n Returns figure for a gantt chart\n\n :param (array|list) df: input data for gantt chart. Must be either a\n a dataframe or a list. If dataframe, the columns must include\n 'Task', 'Start' and 'Finish'. Other columns can be included and\n used for indexing. If a list, its elements must be dictionaries\n with the same required column headers: 'Task', 'Start' and\n 'Finish'.\n :param (str|list|dict|tuple) colors: either a plotly scale name, an\n rgb or hex color, a color tuple or a list of colors. An rgb color\n is of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colors is a list, it must\n contain the valid color types aforementioned as its members.\n If a dictionary, all values of the indexing column must be keys in\n colors.\n :param (str|float) index_col: the column header (if df is a data\n frame) that will function as the indexing column. If df is a list,\n index_col must be one of the keys in all the items of df.\n :param (bool) show_colorbar: determines if colorbar will be visible.\n Only applies if values in the index column are numeric.\n :param (bool) show_hover_fill: enables/disables the hovertext for the\n filled area of the chart.\n :param (bool) reverse_colors: reverses the order of selected colors\n :param (str) title: the title of the chart\n :param (float) bar_width: the width of the horizontal bars in the plot\n :param (bool) showgrid_x: show/hide the x-axis grid\n :param (bool) showgrid_y: show/hide the y-axis grid\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple Gantt Chart\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task="Job A", Start='2009-01-01', Finish='2009-02-30'),\n ... dict(Task="Job B", Start='2009-03-05', Finish='2009-04-15'),\n ... dict(Task="Job C", Start='2009-02-20', Finish='2009-05-30')]\n\n >>> # Create a figure\n >>> fig = create_gantt(df)\n >>> fig.show()\n\n\n Example 2: Index by Column with Numerical Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task="Job A", Start='2009-01-01',\n ... Finish='2009-02-30', Complete=10),\n ... dict(Task="Job B", Start='2009-03-05',\n ... Finish='2009-04-15', Complete=60),\n ... dict(Task="Job C", Start='2009-02-20',\n ... Finish='2009-05-30', Complete=95)]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n\n\n Example 3: Index by Column with String Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task="Job A", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task="Job B", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task="Job C", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],\n ... index_col='Resource', reverse_colors=True,\n ... show_colorbar=True)\n >>> fig.show()\n\n\n Example 4: Use a dictionary for colors\n\n >>> from plotly.figure_factory import create_gantt\n >>> # Make data for chart\n >>> df = [dict(Task="Job A", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task="Job B", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task="Job C", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Make a dictionary of colors\n >>> colors = {'Apple': 'rgb(255, 0, 0)',\n ... 'Grape': 'rgb(170, 14, 200)',\n ... 'Banana': (1, 1, 0.2)}\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=colors, index_col='Resource',\n ... show_colorbar=True)\n\n >>> fig.show()\n\n Example 5: Use a pandas dataframe\n\n >>> from plotly.figure_factory import create_gantt\n >>> import pandas as pd\n\n >>> # Make data as a dataframe\n >>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],\n ... ['Fast', '2011-01-01', '2012-06-05', 55],\n ... ['Eat', '2012-01-05', '2013-07-05', 94]],\n ... columns=['Task', 'Start', 'Finish', 'Complete'])\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n """\n # validate gantt input data\n chart = validate_gantt(df)\n\n if index_col:\n if index_col not in chart[0]:\n raise exceptions.PlotlyError(\n "In order to use an indexing column and assign colors to "\n "the values of the index, you must choose an actual "\n "column name in the dataframe or key if a list of "\n "dictionaries is being used."\n )\n\n # validate gantt index column\n index_list = []\n for dictionary in chart:\n index_list.append(dictionary[index_col])\n utils.validate_index(index_list)\n\n # Validate colors\n if isinstance(colors, dict):\n colors = clrs.validate_colors_dict(colors, "rgb")\n else:\n colors = clrs.validate_colors(colors, "rgb")\n\n if reverse_colors is True:\n colors.reverse()\n\n if not index_col:\n if isinstance(colors, dict):\n raise exceptions.PlotlyError(\n "Error. You have set colors to a dictionary but have not "\n "picked an index. An index is required if you are "\n "assigning colors to particular values in a dictionary."\n )\n fig = gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n show_colorbar=show_colorbar,\n )\n return fig\n else:\n if not isinstance(colors, dict):\n fig = gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n else:\n fig = gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_gantt.py
_gantt.py
Python
34,760
0.95
0.152805
0.096738
python-kit
471
2024-05-26T07:10:43.645476
BSD-3-Clause
false
ecdabdd6fe1eea1d9c635ba7faa1e33b
from plotly.express._core import build_dataframe\nfrom plotly.express._doc import make_docstring\nfrom plotly.express._chart_types import choropleth_mapbox, scatter_mapbox\nimport narwhals.stable.v1 as nw\nimport numpy as np\n\n\ndef _project_latlon_to_wgs84(lat, lon):\n """\n Projects lat and lon to WGS84, used to get regular hexagons on a mapbox map\n """\n x = lon * np.pi / 180\n y = np.arctanh(np.sin(lat * np.pi / 180))\n return x, y\n\n\ndef _project_wgs84_to_latlon(x, y):\n """\n Projects WGS84 to lat and lon, used to get regular hexagons on a mapbox map\n """\n lon = x * 180 / np.pi\n lat = (2 * np.arctan(np.exp(y)) - np.pi / 2) * 180 / np.pi\n return lat, lon\n\n\ndef _getBoundsZoomLevel(lon_min, lon_max, lat_min, lat_max, mapDim):\n """\n Get the mapbox zoom level given bounds and a figure dimension\n Source: https://stackoverflow.com/questions/6048975/google-maps-v3-how-to-calculate-the-zoom-level-for-a-given-bounds\n """\n\n scale = (\n 2 # adjustment to reflect MapBox base tiles are 512x512 vs. Google's 256x256\n )\n WORLD_DIM = {"height": 256 * scale, "width": 256 * scale}\n ZOOM_MAX = 18\n\n def latRad(lat):\n sin = np.sin(lat * np.pi / 180)\n radX2 = np.log((1 + sin) / (1 - sin)) / 2\n return max(min(radX2, np.pi), -np.pi) / 2\n\n def zoom(mapPx, worldPx, fraction):\n return 0.95 * np.log(mapPx / worldPx / fraction) / np.log(2)\n\n latFraction = (latRad(lat_max) - latRad(lat_min)) / np.pi\n\n lngDiff = lon_max - lon_min\n lngFraction = ((lngDiff + 360) if lngDiff < 0 else lngDiff) / 360\n\n latZoom = zoom(mapDim["height"], WORLD_DIM["height"], latFraction)\n lngZoom = zoom(mapDim["width"], WORLD_DIM["width"], lngFraction)\n\n return min(latZoom, lngZoom, ZOOM_MAX)\n\n\ndef _compute_hexbin(x, y, x_range, y_range, color, nx, agg_func, min_count):\n """\n Computes the aggregation at hexagonal bin level.\n Also defines the coordinates of the hexagons for plotting.\n The binning is inspired by matplotlib's implementation.\n\n Parameters\n ----------\n x : np.ndarray\n Array of x values (shape N)\n y : np.ndarray\n Array of y values (shape N)\n x_range : np.ndarray\n Min and max x (shape 2)\n y_range : np.ndarray\n Min and max y (shape 2)\n color : np.ndarray\n Metric to aggregate at hexagon level (shape N)\n nx : int\n Number of hexagons horizontally\n agg_func : function\n Numpy compatible aggregator, this function must take a one-dimensional\n np.ndarray as input and output a scalar\n min_count : int\n Minimum number of points in the hexagon for the hexagon to be displayed\n\n Returns\n -------\n np.ndarray\n X coordinates of each hexagon (shape M x 6)\n np.ndarray\n Y coordinates of each hexagon (shape M x 6)\n np.ndarray\n Centers of the hexagons (shape M x 2)\n np.ndarray\n Aggregated value in each hexagon (shape M)\n\n """\n xmin = x_range.min()\n xmax = x_range.max()\n ymin = y_range.min()\n ymax = y_range.max()\n\n # In the x-direction, the hexagons exactly cover the region from\n # xmin to xmax. Need some padding to avoid roundoff errors.\n padding = 1.0e-9 * (xmax - xmin)\n xmin -= padding\n xmax += padding\n\n Dx = xmax - xmin\n Dy = ymax - ymin\n if Dx == 0 and Dy > 0:\n dx = Dy / nx\n elif Dx == 0 and Dy == 0:\n dx, _ = _project_latlon_to_wgs84(1, 1)\n else:\n dx = Dx / nx\n dy = dx * np.sqrt(3)\n ny = np.ceil(Dy / dy).astype(int)\n\n # Center the hexagons vertically since we only want regular hexagons\n ymin -= (ymin + dy * ny - ymax) / 2\n\n x = (x - xmin) / dx\n y = (y - ymin) / dy\n ix1 = np.round(x).astype(int)\n iy1 = np.round(y).astype(int)\n ix2 = np.floor(x).astype(int)\n iy2 = np.floor(y).astype(int)\n\n nx1 = nx + 1\n ny1 = ny + 1\n nx2 = nx\n ny2 = ny\n n = nx1 * ny1 + nx2 * ny2\n\n d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2\n d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2\n bdist = d1 < d2\n\n if color is None:\n lattice1 = np.zeros((nx1, ny1))\n lattice2 = np.zeros((nx2, ny2))\n c1 = (0 <= ix1) & (ix1 < nx1) & (0 <= iy1) & (iy1 < ny1) & bdist\n c2 = (0 <= ix2) & (ix2 < nx2) & (0 <= iy2) & (iy2 < ny2) & ~bdist\n np.add.at(lattice1, (ix1[c1], iy1[c1]), 1)\n np.add.at(lattice2, (ix2[c2], iy2[c2]), 1)\n if min_count is not None:\n lattice1[lattice1 < min_count] = np.nan\n lattice2[lattice2 < min_count] = np.nan\n accum = np.concatenate([lattice1.ravel(), lattice2.ravel()])\n good_idxs = ~np.isnan(accum)\n else:\n if min_count is None:\n min_count = 1\n\n # create accumulation arrays\n lattice1 = np.empty((nx1, ny1), dtype=object)\n for i in range(nx1):\n for j in range(ny1):\n lattice1[i, j] = []\n lattice2 = np.empty((nx2, ny2), dtype=object)\n for i in range(nx2):\n for j in range(ny2):\n lattice2[i, j] = []\n\n for i in range(len(x)):\n if bdist[i]:\n if 0 <= ix1[i] < nx1 and 0 <= iy1[i] < ny1:\n lattice1[ix1[i], iy1[i]].append(color[i])\n else:\n if 0 <= ix2[i] < nx2 and 0 <= iy2[i] < ny2:\n lattice2[ix2[i], iy2[i]].append(color[i])\n\n for i in range(nx1):\n for j in range(ny1):\n vals = lattice1[i, j]\n if len(vals) >= min_count:\n lattice1[i, j] = agg_func(vals)\n else:\n lattice1[i, j] = np.nan\n for i in range(nx2):\n for j in range(ny2):\n vals = lattice2[i, j]\n if len(vals) >= min_count:\n lattice2[i, j] = agg_func(vals)\n else:\n lattice2[i, j] = np.nan\n\n accum = np.hstack(\n (lattice1.astype(float).ravel(), lattice2.astype(float).ravel())\n )\n good_idxs = ~np.isnan(accum)\n\n agreggated_value = accum[good_idxs]\n\n centers = np.zeros((n, 2), float)\n centers[: nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)\n centers[: nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)\n centers[nx1 * ny1 :, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)\n centers[nx1 * ny1 :, 1] = np.tile(np.arange(ny2), nx2) + 0.5\n centers[:, 0] *= dx\n centers[:, 1] *= dy\n centers[:, 0] += xmin\n centers[:, 1] += ymin\n centers = centers[good_idxs]\n\n # Define normalised regular hexagon coordinates\n hx = [0, 0.5, 0.5, 0, -0.5, -0.5]\n hy = [\n -0.5 / np.cos(np.pi / 6),\n -0.5 * np.tan(np.pi / 6),\n 0.5 * np.tan(np.pi / 6),\n 0.5 / np.cos(np.pi / 6),\n 0.5 * np.tan(np.pi / 6),\n -0.5 * np.tan(np.pi / 6),\n ]\n\n # Number of hexagons needed\n m = len(centers)\n\n # Coordinates for all hexagonal patches\n hxs = np.array([hx] * m) * dx + np.vstack(centers[:, 0])\n hys = np.array([hy] * m) * dy / np.sqrt(3) + np.vstack(centers[:, 1])\n\n return hxs, hys, centers, agreggated_value\n\n\ndef _compute_wgs84_hexbin(\n lat=None,\n lon=None,\n lat_range=None,\n lon_range=None,\n color=None,\n nx=None,\n agg_func=None,\n min_count=None,\n native_namespace=None,\n):\n """\n Computes the lat-lon aggregation at hexagonal bin level.\n Latitude and longitude need to be projected to WGS84 before aggregating\n in order to display regular hexagons on the map.\n\n Parameters\n ----------\n lat : np.ndarray\n Array of latitudes (shape N)\n lon : np.ndarray\n Array of longitudes (shape N)\n lat_range : np.ndarray\n Min and max latitudes (shape 2)\n lon_range : np.ndarray\n Min and max longitudes (shape 2)\n color : np.ndarray\n Metric to aggregate at hexagon level (shape N)\n nx : int\n Number of hexagons horizontally\n agg_func : function\n Numpy compatible aggregator, this function must take a one-dimensional\n np.ndarray as input and output a scalar\n min_count : int\n Minimum number of points in the hexagon for the hexagon to be displayed\n\n Returns\n -------\n np.ndarray\n Lat coordinates of each hexagon (shape M x 6)\n np.ndarray\n Lon coordinates of each hexagon (shape M x 6)\n nw.Series\n Unique id for each hexagon, to be used in the geojson data (shape M)\n np.ndarray\n Aggregated value in each hexagon (shape M)\n\n """\n # Project to WGS 84\n x, y = _project_latlon_to_wgs84(lat, lon)\n\n if lat_range is None:\n lat_range = np.array([lat.min(), lat.max()])\n if lon_range is None:\n lon_range = np.array([lon.min(), lon.max()])\n\n x_range, y_range = _project_latlon_to_wgs84(lat_range, lon_range)\n\n hxs, hys, centers, agreggated_value = _compute_hexbin(\n x, y, x_range, y_range, color, nx, agg_func, min_count\n )\n\n # Convert back to lat-lon\n hexagons_lats, hexagons_lons = _project_wgs84_to_latlon(hxs, hys)\n\n # Create unique feature id based on hexagon center\n centers = centers.astype(str)\n hexagons_ids = (\n nw.from_dict(\n {"x1": centers[:, 0], "x2": centers[:, 1]},\n native_namespace=native_namespace,\n )\n .select(hexagons_ids=nw.concat_str([nw.col("x1"), nw.col("x2")], separator=","))\n .get_column("hexagons_ids")\n )\n\n return hexagons_lats, hexagons_lons, hexagons_ids, agreggated_value\n\n\ndef _hexagons_to_geojson(hexagons_lats, hexagons_lons, ids=None):\n """\n Creates a geojson of hexagonal features based on the outputs of\n _compute_wgs84_hexbin\n """\n features = []\n if ids is None:\n ids = np.arange(len(hexagons_lats))\n for lat, lon, idx in zip(hexagons_lats, hexagons_lons, ids):\n points = np.array([lon, lat]).T.tolist()\n points.append(points[0])\n features.append(\n dict(\n type="Feature",\n id=idx,\n geometry=dict(type="Polygon", coordinates=[points]),\n )\n )\n return dict(type="FeatureCollection", features=features)\n\n\ndef create_hexbin_mapbox(\n data_frame=None,\n lat=None,\n lon=None,\n color=None,\n nx_hexagon=5,\n agg_func=None,\n animation_frame=None,\n color_discrete_sequence=None,\n color_discrete_map={},\n labels={},\n color_continuous_scale=None,\n range_color=None,\n color_continuous_midpoint=None,\n opacity=None,\n zoom=None,\n center=None,\n mapbox_style=None,\n title=None,\n template=None,\n width=None,\n height=None,\n min_count=None,\n show_original_data=False,\n original_data_marker=None,\n):\n """\n Returns a figure aggregating scattered points into connected hexagons\n """\n args = build_dataframe(args=locals(), constructor=None)\n native_namespace = nw.get_native_namespace(args["data_frame"])\n if agg_func is None:\n agg_func = np.mean\n\n lat_range = (\n args["data_frame"]\n .select(\n nw.min(args["lat"]).name.suffix("_min"),\n nw.max(args["lat"]).name.suffix("_max"),\n )\n .to_numpy()\n .squeeze()\n )\n\n lon_range = (\n args["data_frame"]\n .select(\n nw.min(args["lon"]).name.suffix("_min"),\n nw.max(args["lon"]).name.suffix("_max"),\n )\n .to_numpy()\n .squeeze()\n )\n\n hexagons_lats, hexagons_lons, hexagons_ids, count = _compute_wgs84_hexbin(\n lat=args["data_frame"].get_column(args["lat"]).to_numpy(),\n lon=args["data_frame"].get_column(args["lon"]).to_numpy(),\n lat_range=lat_range,\n lon_range=lon_range,\n color=None,\n nx=nx_hexagon,\n agg_func=agg_func,\n min_count=min_count,\n native_namespace=native_namespace,\n )\n\n geojson = _hexagons_to_geojson(hexagons_lats, hexagons_lons, hexagons_ids)\n\n if zoom is None:\n if height is None and width is None:\n mapDim = dict(height=450, width=450)\n elif height is None and width is not None:\n mapDim = dict(height=450, width=width)\n elif height is not None and width is None:\n mapDim = dict(height=height, width=height)\n else:\n mapDim = dict(height=height, width=width)\n zoom = _getBoundsZoomLevel(\n lon_range[0], lon_range[1], lat_range[0], lat_range[1], mapDim\n )\n\n if center is None:\n center = dict(lat=lat_range.mean(), lon=lon_range.mean())\n\n if args["animation_frame"] is not None:\n groups = dict(\n args["data_frame"]\n .group_by(args["animation_frame"], drop_null_keys=True)\n .__iter__()\n )\n else:\n groups = {(0,): args["data_frame"]}\n\n agg_data_frame_list = []\n for key, df in groups.items():\n _, _, hexagons_ids, aggregated_value = _compute_wgs84_hexbin(\n lat=df.get_column(args["lat"]).to_numpy(),\n lon=df.get_column(args["lon"]).to_numpy(),\n lat_range=lat_range,\n lon_range=lon_range,\n color=df.get_column(args["color"]).to_numpy() if args["color"] else None,\n nx=nx_hexagon,\n agg_func=agg_func,\n min_count=min_count,\n native_namespace=native_namespace,\n )\n agg_data_frame_list.append(\n nw.from_dict(\n {\n "frame": [key[0]] * len(hexagons_ids),\n "locations": hexagons_ids,\n "color": aggregated_value,\n },\n native_namespace=native_namespace,\n )\n )\n\n agg_data_frame = nw.concat(agg_data_frame_list, how="vertical").with_columns(\n color=nw.col("color").cast(nw.Int64)\n )\n\n if range_color is None:\n range_color = [agg_data_frame["color"].min(), agg_data_frame["color"].max()]\n\n fig = choropleth_mapbox(\n data_frame=agg_data_frame.to_native(),\n geojson=geojson,\n locations="locations",\n color="color",\n hover_data={"color": True, "locations": False, "frame": False},\n animation_frame=("frame" if args["animation_frame"] is not None else None),\n color_discrete_sequence=color_discrete_sequence,\n color_discrete_map=color_discrete_map,\n labels=labels,\n color_continuous_scale=color_continuous_scale,\n range_color=range_color,\n color_continuous_midpoint=color_continuous_midpoint,\n opacity=opacity,\n zoom=zoom,\n center=center,\n mapbox_style=mapbox_style,\n title=title,\n template=template,\n width=width,\n height=height,\n )\n\n if show_original_data:\n original_fig = scatter_mapbox(\n data_frame=(\n args["data_frame"].sort(\n by=args["animation_frame"], descending=False, nulls_last=True\n )\n if args["animation_frame"] is not None\n else args["data_frame"]\n ).to_native(),\n lat=args["lat"],\n lon=args["lon"],\n animation_frame=args["animation_frame"],\n )\n original_fig.data[0].hoverinfo = "skip"\n original_fig.data[0].hovertemplate = None\n original_fig.data[0].marker = original_data_marker\n\n fig.add_trace(original_fig.data[0])\n\n if args["animation_frame"] is not None:\n for i in range(len(original_fig.frames)):\n original_fig.frames[i].data[0].hoverinfo = "skip"\n original_fig.frames[i].data[0].hovertemplate = None\n original_fig.frames[i].data[0].marker = original_data_marker\n\n fig.frames[i].data = [\n fig.frames[i].data[0],\n original_fig.frames[i].data[0],\n ]\n\n return fig\n\n\ncreate_hexbin_mapbox.__doc__ = make_docstring(\n create_hexbin_mapbox,\n override_dict=dict(\n nx_hexagon=["int", "Number of hexagons (horizontally) to be created"],\n agg_func=[\n "function",\n "Numpy array aggregator, it must take as input a 1D array",\n "and output a scalar value.",\n ],\n min_count=[\n "int",\n "Minimum number of points in a hexagon for it to be displayed.",\n "If None and color is not set, display all hexagons.",\n "If None and color is set, only display hexagons that contain points.",\n ],\n show_original_data=[\n "bool",\n "Whether to show the original data on top of the hexbin aggregation.",\n ],\n original_data_marker=["dict", "Scattermapbox marker options."],\n ),\n)\n
.venv\Lib\site-packages\plotly\figure_factory\_hexbin_mapbox.py
_hexbin_mapbox.py
Python
16,682
0.95
0.108365
0.021834
awesome-app
197
2025-06-01T14:36:25.083706
BSD-3-Clause
false
f8a4cde7fbc100d56ce0d15b4e9ef528
from plotly import exceptions\nfrom plotly.graph_objs import graph_objs\nfrom plotly.figure_factory import utils\n\n\n# Default colours for finance charts\n_DEFAULT_INCREASING_COLOR = "#3D9970" # http://clrs.cc\n_DEFAULT_DECREASING_COLOR = "#FF4136"\n\n\ndef validate_ohlc(open, high, low, close, direction, **kwargs):\n """\n ohlc and candlestick specific validations\n\n Specifically, this checks that the high value is the greatest value and\n the low value is the lowest value in each unit.\n\n See FigureFactory.create_ohlc() or FigureFactory.create_candlestick()\n for params\n\n :raises: (PlotlyError) If the high value is not the greatest value in\n each unit.\n :raises: (PlotlyError) If the low value is not the lowest value in each\n unit.\n :raises: (PlotlyError) If direction is not 'increasing' or 'decreasing'\n """\n for lst in [open, low, close]:\n for index in range(len(high)):\n if high[index] < lst[index]:\n raise exceptions.PlotlyError(\n "Oops! Looks like some of "\n "your high values are less "\n "the corresponding open, "\n "low, or close values. "\n "Double check that your data "\n "is entered in O-H-L-C order"\n )\n\n for lst in [open, high, close]:\n for index in range(len(low)):\n if low[index] > lst[index]:\n raise exceptions.PlotlyError(\n "Oops! Looks like some of "\n "your low values are greater "\n "than the corresponding high"\n ", open, or close values. "\n "Double check that your data "\n "is entered in O-H-L-C order"\n )\n\n direction_opts = ("increasing", "decreasing", "both")\n if direction not in direction_opts:\n raise exceptions.PlotlyError(\n "direction must be defined as 'increasing', 'decreasing', or 'both'"\n )\n\n\ndef make_increasing_ohlc(open, high, low, close, dates, **kwargs):\n """\n Makes increasing ohlc sticks\n\n _make_increasing_ohlc() and _make_decreasing_ohlc separate the\n increasing trace from the decreasing trace so kwargs (such as\n color) can be passed separately to increasing or decreasing traces\n when direction is set to 'increasing' or 'decreasing' in\n FigureFactory.create_candlestick()\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_incr_data: Scatter trace of all increasing ohlc\n sticks.\n """\n (flat_increase_x, flat_increase_y, text_increase) = _OHLC(\n open, high, low, close, dates\n ).get_increase()\n\n if "name" in kwargs:\n showlegend = True\n else:\n kwargs.setdefault("name", "Increasing")\n showlegend = False\n\n kwargs.setdefault("line", dict(color=_DEFAULT_INCREASING_COLOR, width=1))\n kwargs.setdefault("text", text_increase)\n\n ohlc_incr = dict(\n type="scatter",\n x=flat_increase_x,\n y=flat_increase_y,\n mode="lines",\n showlegend=showlegend,\n **kwargs,\n )\n return ohlc_incr\n\n\ndef make_decreasing_ohlc(open, high, low, close, dates, **kwargs):\n """\n Makes decreasing ohlc sticks\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing values\n :param (list) dates: list of datetime objects. Default: None\n :param kwargs: kwargs to be passed to increasing trace via\n plotly.graph_objs.Scatter.\n\n :rtype (trace) ohlc_decr_data: Scatter trace of all decreasing ohlc\n sticks.\n """\n (flat_decrease_x, flat_decrease_y, text_decrease) = _OHLC(\n open, high, low, close, dates\n ).get_decrease()\n\n kwargs.setdefault("line", dict(color=_DEFAULT_DECREASING_COLOR, width=1))\n kwargs.setdefault("text", text_decrease)\n kwargs.setdefault("showlegend", False)\n kwargs.setdefault("name", "Decreasing")\n\n ohlc_decr = dict(\n type="scatter", x=flat_decrease_x, y=flat_decrease_y, mode="lines", **kwargs\n )\n return ohlc_decr\n\n\ndef create_ohlc(open, high, low, close, dates=None, direction="both", **kwargs):\n """\n **deprecated**, use instead the plotly.graph_objects trace\n :class:`plotly.graph_objects.Ohlc`\n\n :param (list) open: opening values\n :param (list) high: high values\n :param (list) low: low values\n :param (list) close: closing\n :param (list) dates: list of datetime objects. Default: None\n :param (string) direction: direction can be 'increasing', 'decreasing',\n or 'both'. When the direction is 'increasing', the returned figure\n consists of all units where the close value is greater than the\n corresponding open value, and when the direction is 'decreasing',\n the returned figure consists of all units where the close value is\n less than or equal to the corresponding open value. When the\n direction is 'both', both increasing and decreasing units are\n returned. Default: 'both'\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter.\n These kwargs describe other attributes about the ohlc Scatter trace\n such as the color or the legend name. For more information on valid\n kwargs call help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of an ohlc chart figure.\n\n Example 1: Simple OHLC chart from a Pandas DataFrame\n\n >>> from plotly.figure_factory import create_ohlc\n >>> from datetime import datetime\n\n >>> import pandas as pd\n >>> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')\n >>> fig = create_ohlc(df['AAPL.Open'], df['AAPL.High'], df['AAPL.Low'], df['AAPL.Close'], dates=df.index)\n >>> fig.show()\n """\n if dates is not None:\n utils.validate_equal_length(open, high, low, close, dates)\n else:\n utils.validate_equal_length(open, high, low, close)\n validate_ohlc(open, high, low, close, direction, **kwargs)\n\n if direction == "increasing":\n ohlc_incr = make_increasing_ohlc(open, high, low, close, dates, **kwargs)\n data = [ohlc_incr]\n elif direction == "decreasing":\n ohlc_decr = make_decreasing_ohlc(open, high, low, close, dates, **kwargs)\n data = [ohlc_decr]\n else:\n ohlc_incr = make_increasing_ohlc(open, high, low, close, dates, **kwargs)\n ohlc_decr = make_decreasing_ohlc(open, high, low, close, dates, **kwargs)\n data = [ohlc_incr, ohlc_decr]\n\n layout = graph_objs.Layout(xaxis=dict(zeroline=False), hovermode="closest")\n\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _OHLC(object):\n """\n Refer to FigureFactory.create_ohlc_increase() for docstring.\n """\n\n def __init__(self, open, high, low, close, dates, **kwargs):\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.empty = [None] * len(open)\n self.dates = dates\n\n self.all_x = []\n self.all_y = []\n self.increase_x = []\n self.increase_y = []\n self.decrease_x = []\n self.decrease_y = []\n self.get_all_xy()\n self.separate_increase_decrease()\n\n def get_all_xy(self):\n """\n Zip data to create OHLC shape\n\n OHLC shape: low to high vertical bar with\n horizontal branches for open and close values.\n If dates were added, the smallest date difference is calculated and\n multiplied by .2 to get the length of the open and close branches.\n If no date data was provided, the x-axis is a list of integers and the\n length of the open and close branches is .2.\n """\n self.all_y = list(\n zip(\n self.open,\n self.open,\n self.high,\n self.low,\n self.close,\n self.close,\n self.empty,\n )\n )\n if self.dates is not None:\n date_dif = []\n for i in range(len(self.dates) - 1):\n date_dif.append(self.dates[i + 1] - self.dates[i])\n date_dif_min = (min(date_dif)) / 5\n self.all_x = [\n [x - date_dif_min, x, x, x, x, x + date_dif_min, None]\n for x in self.dates\n ]\n else:\n self.all_x = [\n [x - 0.2, x, x, x, x, x + 0.2, None] for x in range(len(self.open))\n ]\n\n def separate_increase_decrease(self):\n """\n Separate data into two groups: increase and decrease\n\n (1) Increase, where close > open and\n (2) Decrease, where close <= open\n """\n for index in range(len(self.open)):\n if self.close[index] is None:\n pass\n elif self.close[index] > self.open[index]:\n self.increase_x.append(self.all_x[index])\n self.increase_y.append(self.all_y[index])\n else:\n self.decrease_x.append(self.all_x[index])\n self.decrease_y.append(self.all_y[index])\n\n def get_increase(self):\n """\n Flatten increase data and get increase text\n\n :rtype (list, list, list): flat_increase_x: x-values for the increasing\n trace, flat_increase_y: y=values for the increasing trace and\n text_increase: hovertext for the increasing trace\n """\n flat_increase_x = utils.flatten(self.increase_x)\n flat_increase_y = utils.flatten(self.increase_y)\n text_increase = ("Open", "Open", "High", "Low", "Close", "Close", "") * (\n len(self.increase_x)\n )\n\n return flat_increase_x, flat_increase_y, text_increase\n\n def get_decrease(self):\n """\n Flatten decrease data and get decrease text\n\n :rtype (list, list, list): flat_decrease_x: x-values for the decreasing\n trace, flat_decrease_y: y=values for the decreasing trace and\n text_decrease: hovertext for the decreasing trace\n """\n flat_decrease_x = utils.flatten(self.decrease_x)\n flat_decrease_y = utils.flatten(self.decrease_y)\n text_decrease = ("Open", "Open", "High", "Low", "Close", "Close", "") * (\n len(self.decrease_x)\n )\n\n return flat_decrease_x, flat_decrease_y, text_decrease\n
.venv\Lib\site-packages\plotly\figure_factory\_ohlc.py
_ohlc.py
Python
10,695
0.95
0.125424
0.012097
node-utils
379
2025-01-27T13:09:26.019065
GPL-3.0
false
7b6b8603a45f8e8d65747c5558b09b48
import math\n\nfrom plotly import exceptions\nfrom plotly.graph_objs import graph_objs\nfrom plotly.figure_factory import utils\n\n\ndef create_quiver(\n x, y, u, v, scale=0.1, arrow_scale=0.3, angle=math.pi / 9, scaleratio=None, **kwargs\n):\n """\n Returns data for a quiver plot.\n\n :param (list|ndarray) x: x coordinates of the arrow locations\n :param (list|ndarray) y: y coordinates of the arrow locations\n :param (list|ndarray) u: x components of the arrow vectors\n :param (list|ndarray) v: y components of the arrow vectors\n :param (float in [0,1]) scale: scales size of the arrows(ideally to\n avoid overlap). Default = .1\n :param (float in [0,1]) arrow_scale: value multiplied to length of barb\n to get length of arrowhead. Default = .3\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (positive float) scaleratio: the ratio between the scale of the y-axis\n and the scale of the x-axis (scale_y / scale_x). Default = None, the\n scale ratio is not fixed.\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of quiver figure.\n\n Example 1: Trivial Quiver\n\n >>> from plotly.figure_factory import create_quiver\n >>> import math\n\n >>> # 1 Arrow from (0,0) to (1,1)\n >>> fig = create_quiver(x=[0], y=[0], u=[1], v=[1], scale=1)\n >>> fig.show()\n\n\n Example 2: Quiver plot using meshgrid\n\n >>> from plotly.figure_factory import create_quiver\n\n >>> import numpy as np\n >>> import math\n\n >>> # Add data\n >>> x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))\n >>> u = np.cos(x)*y\n >>> v = np.sin(x)*y\n\n >>> #Create quiver\n >>> fig = create_quiver(x, y, u, v)\n >>> fig.show()\n\n\n Example 3: Styling the quiver plot\n\n >>> from plotly.figure_factory import create_quiver\n >>> import numpy as np\n >>> import math\n\n >>> # Add data\n >>> x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),\n ... np.arange(-math.pi, math.pi, .5))\n >>> u = np.cos(x)*y\n >>> v = np.sin(x)*y\n\n >>> # Create quiver\n >>> fig = create_quiver(x, y, u, v, scale=.2, arrow_scale=.3, angle=math.pi/6,\n ... name='Wind Velocity', line=dict(width=1))\n\n >>> # Add title to layout\n >>> fig.update_layout(title='Quiver Plot') # doctest: +SKIP\n >>> fig.show()\n\n\n Example 4: Forcing a fix scale ratio to maintain the arrow length\n\n >>> from plotly.figure_factory import create_quiver\n >>> import numpy as np\n\n >>> # Add data\n >>> x,y = np.meshgrid(np.arange(0.5, 3.5, .5), np.arange(0.5, 4.5, .5))\n >>> u = x\n >>> v = y\n >>> angle = np.arctan(v / u)\n >>> norm = 0.25\n >>> u = norm * np.cos(angle)\n >>> v = norm * np.sin(angle)\n\n >>> # Create quiver with a fix scale ratio\n >>> fig = create_quiver(x, y, u, v, scale = 1, scaleratio = 0.5)\n >>> fig.show()\n """\n utils.validate_equal_length(x, y, u, v)\n utils.validate_positive_scalars(arrow_scale=arrow_scale, scale=scale)\n\n if scaleratio is None:\n quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle)\n else:\n quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle, scaleratio)\n\n barb_x, barb_y = quiver_obj.get_barbs()\n arrow_x, arrow_y = quiver_obj.get_quiver_arrows()\n\n quiver_plot = graph_objs.Scatter(\n x=barb_x + arrow_x, y=barb_y + arrow_y, mode="lines", **kwargs\n )\n\n data = [quiver_plot]\n\n if scaleratio is None:\n layout = graph_objs.Layout(hovermode="closest")\n else:\n layout = graph_objs.Layout(\n hovermode="closest", yaxis=dict(scaleratio=scaleratio, scaleanchor="x")\n )\n\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Quiver(object):\n """\n Refer to FigureFactory.create_quiver() for docstring\n """\n\n def __init__(self, x, y, u, v, scale, arrow_scale, angle, scaleratio=1, **kwargs):\n try:\n x = utils.flatten(x)\n except exceptions.PlotlyError:\n pass\n\n try:\n y = utils.flatten(y)\n except exceptions.PlotlyError:\n pass\n\n try:\n u = utils.flatten(u)\n except exceptions.PlotlyError:\n pass\n\n try:\n v = utils.flatten(v)\n except exceptions.PlotlyError:\n pass\n\n self.x = x\n self.y = y\n self.u = u\n self.v = v\n self.scale = scale\n self.scaleratio = scaleratio\n self.arrow_scale = arrow_scale\n self.angle = angle\n self.end_x = []\n self.end_y = []\n self.scale_uv()\n barb_x, barb_y = self.get_barbs()\n arrow_x, arrow_y = self.get_quiver_arrows()\n\n def scale_uv(self):\n """\n Scales u and v to avoid overlap of the arrows.\n\n u and v are added to x and y to get the\n endpoints of the arrows so a smaller scale value will\n result in less overlap of arrows.\n """\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]\n\n def get_barbs(self):\n """\n Creates x and y startpoint and endpoint pairs\n\n After finding the endpoint of each barb this zips startpoint and\n endpoint pairs to create 2 lists: x_values for barbs and y values\n for barbs\n\n :rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint\n x_value pairs separated by a None to create the barb of the arrow,\n and list of startpoint and endpoint y_value pairs separated by a\n None to create the barb of the arrow.\n """\n self.end_x = [i + j for i, j in zip(self.x, self.u)]\n self.end_y = [i + j for i, j in zip(self.y, self.v)]\n empty = [None] * len(self.x)\n barb_x = utils.flatten(zip(self.x, self.end_x, empty))\n barb_y = utils.flatten(zip(self.y, self.end_y, empty))\n return barb_x, barb_y\n\n def get_quiver_arrows(self):\n """\n Creates lists of x and y values to plot the arrows\n\n Gets length of each barb then calculates the length of each side of\n the arrow. Gets angle of barb and applies angle to each side of the\n arrowhead. Next uses arrow_scale to scale the length of arrowhead and\n creates x and y values for arrowhead point1 and point2. Finally x and y\n values for point1, endpoint and point2s for each arrowhead are\n separated by a None and zipped to create lists of x and y values for\n the arrows.\n\n :rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2\n x_values separated by a None to create the arrowhead and list of\n point1, endpoint, point2 y_values separated by a None to create\n the barb of the arrow.\n """\n dif_x = [i - j for i, j in zip(self.end_x, self.x)]\n dif_y = [i - j for i, j in zip(self.end_y, self.y)]\n\n # Get barb lengths(default arrow length = 30% barb length)\n barb_len = [None] * len(self.x)\n for index in range(len(barb_len)):\n barb_len[index] = math.hypot(dif_x[index] / self.scaleratio, dif_y[index])\n\n # Make arrow lengths\n arrow_len = [None] * len(self.x)\n arrow_len = [i * self.arrow_scale for i in barb_len]\n\n # Get barb angles\n barb_ang = [None] * len(self.x)\n for index in range(len(barb_ang)):\n barb_ang[index] = math.atan2(dif_y[index], dif_x[index] / self.scaleratio)\n\n # Set angles to create arrow\n ang1 = [i + self.angle for i in barb_ang]\n ang2 = [i - self.angle for i in barb_ang]\n\n cos_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n cos_ang1[index] = math.cos(ang1[index])\n seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]\n\n sin_ang1 = [None] * len(ang1)\n for index in range(len(ang1)):\n sin_ang1[index] = math.sin(ang1[index])\n seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]\n\n cos_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n cos_ang2[index] = math.cos(ang2[index])\n seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]\n\n sin_ang2 = [None] * len(ang2)\n for index in range(len(ang2)):\n sin_ang2[index] = math.sin(ang2[index])\n seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]\n\n # Set coordinates to create arrow\n for index in range(len(self.end_x)):\n point1_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg1_x)]\n point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]\n point2_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg2_x)]\n point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]\n\n # Combine lists to create arrow\n empty = [None] * len(self.end_x)\n arrow_x = utils.flatten(zip(point1_x, self.end_x, point2_x, empty))\n arrow_y = utils.flatten(zip(point1_y, self.end_y, point2_y, empty))\n return arrow_x, arrow_y\n
.venv\Lib\site-packages\plotly\figure_factory\_quiver.py
_quiver.py
Python
9,181
0.95
0.169811
0.028708
vue-tools
847
2024-09-16T19:32:54.711052
BSD-3-Clause
false
29e8bf8c288655775edda70801e5307f
from plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nfrom plotly.graph_objs import graph_objs\nfrom plotly.subplots import make_subplots\n\npd = optional_imports.get_module("pandas")\n\nDIAG_CHOICES = ["scatter", "histogram", "box"]\nVALID_COLORMAP_TYPES = ["cat", "seq"]\n\n\ndef endpts_to_intervals(endpts):\n """\n Returns a list of intervals for categorical colormaps\n\n Accepts a list or tuple of sequentially increasing numbers and returns\n a list representation of the mathematical intervals with these numbers\n as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]\n\n :raises: (PlotlyError) If input is not a list or tuple\n :raises: (PlotlyError) If the input contains a string\n :raises: (PlotlyError) If any number does not increase after the\n previous one in the sequence\n """\n length = len(endpts)\n # Check if endpts is a list or tuple\n if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):\n raise exceptions.PlotlyError(\n "The intervals_endpts argument must "\n "be a list or tuple of a sequence "\n "of increasing numbers."\n )\n # Check if endpts contains only numbers\n for item in endpts:\n if isinstance(item, str):\n raise exceptions.PlotlyError(\n "The intervals_endpts argument "\n "must be a list or tuple of a "\n "sequence of increasing "\n "numbers."\n )\n # Check if numbers in endpts are increasing\n for k in range(length - 1):\n if endpts[k] >= endpts[k + 1]:\n raise exceptions.PlotlyError(\n "The intervals_endpts argument "\n "must be a list or tuple of a "\n "sequence of increasing "\n "numbers."\n )\n else:\n intervals = []\n # add -inf to intervals\n intervals.append([float("-inf"), endpts[0]])\n for k in range(length - 1):\n interval = []\n interval.append(endpts[k])\n interval.append(endpts[k + 1])\n intervals.append(interval)\n # add +inf to intervals\n intervals.append([endpts[length - 1], float("inf")])\n return intervals\n\n\ndef hide_tick_labels_from_box_subplots(fig):\n """\n Hides tick labels for box plots in scatterplotmatrix subplots.\n """\n boxplot_xaxes = []\n for trace in fig["data"]:\n if trace["type"] == "box":\n # stores the xaxes which correspond to boxplot subplots\n # since we use xaxis1, xaxis2, etc, in plotly.py\n boxplot_xaxes.append("xaxis{}".format(trace["xaxis"][1:]))\n for xaxis in boxplot_xaxes:\n fig["layout"][xaxis]["showticklabels"] = False\n\n\ndef validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):\n """\n Validates basic inputs for FigureFactory.create_scatterplotmatrix()\n\n :raises: (PlotlyError) If pandas is not imported\n :raises: (PlotlyError) If pandas dataframe is not inputted\n :raises: (PlotlyError) If pandas dataframe has <= 1 columns\n :raises: (PlotlyError) If diagonal plot choice (diag) is not one of\n the viable options\n :raises: (PlotlyError) If colormap_type is not a valid choice\n :raises: (PlotlyError) If kwargs contains 'size', 'color' or\n 'colorscale'\n """\n if not pd:\n raise ImportError(\n "FigureFactory.scatterplotmatrix requires a pandas DataFrame."\n )\n\n # Check if pandas dataframe\n if not isinstance(df, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\n "Dataframe not inputed. Please "\n "use a pandas dataframe to pro"\n "duce a scatterplot matrix."\n )\n\n # Check if dataframe is 1 column or less\n if len(df.columns) <= 1:\n raise exceptions.PlotlyError(\n "Dataframe has only one column. To "\n "use the scatterplot matrix, use at "\n "least 2 columns."\n )\n\n # Check that diag parameter is a valid selection\n if diag not in DIAG_CHOICES:\n raise exceptions.PlotlyError(\n "Make sure diag is set to one of {}".format(DIAG_CHOICES)\n )\n\n # Check that colormap_types is a valid selection\n if colormap_type not in VALID_COLORMAP_TYPES:\n raise exceptions.PlotlyError(\n "Must choose a valid colormap type. "\n "Either 'cat' or 'seq' for a cate"\n "gorical and sequential colormap "\n "respectively."\n )\n\n # Check for not 'size' or 'color' in 'marker' of **kwargs\n if "marker" in kwargs:\n FORBIDDEN_PARAMS = ["size", "color", "colorscale"]\n if any(param in kwargs["marker"] for param in FORBIDDEN_PARAMS):\n raise exceptions.PlotlyError(\n "Your kwargs dictionary cannot "\n "include the 'size', 'color' or "\n "'colorscale' key words inside "\n "the marker dict since 'size' is "\n "already an argument of the "\n "scatterplot matrix function and "\n "both 'color' and 'colorscale "\n "are set internally."\n )\n\n\ndef scatterplot(dataframe, headers, diag, size, height, width, title, **kwargs):\n """\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix without index\n\n """\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim, print_grid=False)\n trace_list = []\n # Insert traces into trace_list\n for listy in dataframe:\n for listx in dataframe:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(x=listx, showlegend=False)\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(y=listx, name=None, showlegend=False)\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n trace = graph_objs.Scatter(\n x=listx, y=listy, mode="markers", showlegend=False, **kwargs\n )\n trace_list.append(trace)\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode="markers",\n marker=dict(size=size),\n showlegend=False,\n **kwargs,\n )\n trace_list.append(trace)\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index], y_index, x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)\n fig["layout"][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = "yaxis{}".format(1 + (dim * j))\n fig["layout"][yaxis_key].update(title=headers[j])\n\n fig["layout"].update(height=height, width=width, title=title, showlegend=True)\n\n hide_tick_labels_from_box_subplots(fig)\n\n return fig\n\n\ndef scatterplot_dict(\n dataframe,\n headers,\n diag,\n size,\n height,\n width,\n title,\n index,\n index_vals,\n endpts,\n colormap,\n colormap_type,\n **kwargs,\n):\n """\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked.\n Used if colormap is a dictionary with index values as keys pointing to\n colors. Forces colormap_type to behave categorically because it would\n not make sense colors are assigned to each index value and thus\n implies that a categorical approach should be taken\n\n """\n\n theme = colormap\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim, print_grid=False)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx, marker=dict(color=theme[name]), showlegend=True\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[name]),\n showlegend=True,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n showlegend=True,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n marker=dict(size=size, color=theme[name]),\n showlegend=True,\n **kwargs,\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(color=theme[name]),\n showlegend=False,\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[name]),\n showlegend=False,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = theme[name]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n showlegend=False,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n marker=dict(size=size, color=theme[name]),\n showlegend=False,\n **kwargs,\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(trace_list[trace_index][name], y_index, x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)\n fig["layout"][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = "yaxis{}".format(1 + (dim * j))\n fig["layout"][yaxis_key].update(title=headers[j])\n\n hide_tick_labels_from_box_subplots(fig)\n\n if diag == "histogram":\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True, barmode="stack"\n )\n return fig\n\n else:\n fig["layout"].update(height=height, width=width, title=title, showlegend=True)\n return fig\n\n\ndef scatterplot_theme(\n dataframe,\n headers,\n diag,\n size,\n height,\n width,\n title,\n index,\n index_vals,\n endpts,\n colormap,\n colormap_type,\n **kwargs,\n):\n """\n Refer to FigureFactory.create_scatterplotmatrix() for docstring\n\n Returns fig for scatterplotmatrix with both index and colormap picked\n\n """\n\n # Check if index is made of string values\n if isinstance(index_vals[0], str):\n unique_index_vals = []\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals.append(name)\n n_colors_len = len(unique_index_vals)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == "seq":\n foo = clrs.color_parser(colormap, clrs.unlabel_rgb)\n foo = clrs.n_colors(foo[0], foo[1], n_colors_len)\n theme = clrs.color_parser(foo, clrs.label_rgb)\n\n if colormap_type == "cat":\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim, print_grid=False)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # create a dictionary for index_vals\n unique_index_vals = {}\n for name in index_vals:\n if name not in unique_index_vals:\n unique_index_vals[name] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for name in sorted(unique_index_vals.keys()):\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if index_vals[j] == name:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(color=theme[c_indx]),\n showlegend=True,\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[c_indx]),\n showlegend=True,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n showlegend=True,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n marker=dict(size=size, color=theme[c_indx]),\n showlegend=True,\n **kwargs,\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(color=theme[c_indx]),\n showlegend=False,\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[c_indx]),\n showlegend=False,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n showlegend=False,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=name,\n marker=dict(size=size, color=theme[c_indx]),\n showlegend=False,\n **kwargs,\n )\n # Push the trace into dictionary\n unique_index_vals[name] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(unique_index_vals)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for name in sorted(trace_list[trace_index].keys()):\n fig.append_trace(trace_list[trace_index][name], y_index, x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)\n fig["layout"][xaxis_key].update(title=headers[j])\n\n for j in range(dim):\n yaxis_key = "yaxis{}".format(1 + (dim * j))\n fig["layout"][yaxis_key].update(title=headers[j])\n\n hide_tick_labels_from_box_subplots(fig)\n\n if diag == "histogram":\n fig["layout"].update(\n height=height,\n width=width,\n title=title,\n showlegend=True,\n barmode="stack",\n )\n return fig\n\n elif diag == "box":\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n else:\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n else:\n if endpts:\n intervals = utils.endpts_to_intervals(endpts)\n\n # Convert colormap to list of n RGB tuples\n if colormap_type == "seq":\n foo = clrs.color_parser(colormap, clrs.unlabel_rgb)\n foo = clrs.n_colors(foo[0], foo[1], len(intervals))\n theme = clrs.color_parser(foo, clrs.label_rgb)\n\n if colormap_type == "cat":\n # leave list of colors the same way\n theme = colormap\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim, print_grid=False)\n trace_list = []\n legend_param = 0\n # Work over all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n interval_labels = {}\n for interval in intervals:\n interval_labels[str(interval)] = []\n\n c_indx = 0 # color index\n # Fill all the rest of the names into the dictionary\n for interval in intervals:\n new_listx = []\n new_listy = []\n for j in range(len(index_vals)):\n if interval[0] < index_vals[j] <= interval[1]:\n new_listx.append(listx[j])\n new_listy.append(listy[j])\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(color=theme[c_indx]),\n showlegend=True,\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[c_indx]),\n showlegend=True,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n (kwargs["marker"]["color"]) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=str(interval),\n showlegend=True,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=str(interval),\n marker=dict(size=size, color=theme[c_indx]),\n showlegend=True,\n **kwargs,\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=new_listx,\n marker=dict(color=theme[c_indx]),\n showlegend=False,\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=new_listx,\n name=None,\n marker=dict(color=theme[c_indx]),\n showlegend=False,\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n (kwargs["marker"]["color"]) = theme[c_indx]\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=str(interval),\n showlegend=False,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=new_listx,\n y=new_listy,\n mode="markers",\n name=str(interval),\n marker=dict(size=size, color=theme[c_indx]),\n showlegend=False,\n **kwargs,\n )\n # Push the trace into dictionary\n interval_labels[str(interval)] = trace\n if c_indx >= (len(theme) - 1):\n c_indx = -1\n c_indx += 1\n trace_list.append(interval_labels)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n for interval in intervals:\n fig.append_trace(\n trace_list[trace_index][str(interval)], y_index, x_index\n )\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)\n fig["layout"][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = "yaxis{}".format(1 + (dim * j))\n fig["layout"][yaxis_key].update(title=headers[j])\n\n hide_tick_labels_from_box_subplots(fig)\n\n if diag == "histogram":\n fig["layout"].update(\n height=height,\n width=width,\n title=title,\n showlegend=True,\n barmode="stack",\n )\n return fig\n\n elif diag == "box":\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n else:\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n else:\n theme = colormap\n\n # add a copy of rgb color to theme if it contains one color\n if len(theme) <= 1:\n theme.append(theme[0])\n\n color = []\n for incr in range(len(theme)):\n color.append([1.0 / (len(theme) - 1) * incr, theme[incr]])\n\n dim = len(dataframe)\n fig = make_subplots(rows=dim, cols=dim, print_grid=False)\n trace_list = []\n legend_param = 0\n # Run through all permutations of list pairs\n for listy in dataframe:\n for listx in dataframe:\n # Generate trace with VISIBLE icon\n if legend_param == 1:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=listx, marker=dict(color=theme[0]), showlegend=False\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=listx, marker=dict(color=theme[0]), showlegend=False\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = index_vals\n kwargs["marker"]["colorscale"] = color\n kwargs["marker"]["showscale"] = True\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode="markers",\n showlegend=False,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode="markers",\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=True,\n ),\n showlegend=False,\n **kwargs,\n )\n # Generate trace with INVISIBLE icon\n else:\n if (listx == listy) and (diag == "histogram"):\n trace = graph_objs.Histogram(\n x=listx, marker=dict(color=theme[0]), showlegend=False\n )\n elif (listx == listy) and (diag == "box"):\n trace = graph_objs.Box(\n y=listx, marker=dict(color=theme[0]), showlegend=False\n )\n else:\n if "marker" in kwargs:\n kwargs["marker"]["size"] = size\n kwargs["marker"]["color"] = index_vals\n kwargs["marker"]["colorscale"] = color\n kwargs["marker"]["showscale"] = False\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode="markers",\n showlegend=False,\n **kwargs,\n )\n else:\n trace = graph_objs.Scatter(\n x=listx,\n y=listy,\n mode="markers",\n marker=dict(\n size=size,\n color=index_vals,\n colorscale=color,\n showscale=False,\n ),\n showlegend=False,\n **kwargs,\n )\n # Push the trace into list\n trace_list.append(trace)\n legend_param += 1\n\n trace_index = 0\n indices = range(1, dim + 1)\n for y_index in indices:\n for x_index in indices:\n fig.append_trace(trace_list[trace_index], y_index, x_index)\n trace_index += 1\n\n # Insert headers into the figure\n for j in range(dim):\n xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)\n fig["layout"][xaxis_key].update(title=headers[j])\n for j in range(dim):\n yaxis_key = "yaxis{}".format(1 + (dim * j))\n fig["layout"][yaxis_key].update(title=headers[j])\n\n hide_tick_labels_from_box_subplots(fig)\n\n if diag == "histogram":\n fig["layout"].update(\n height=height,\n width=width,\n title=title,\n showlegend=True,\n barmode="stack",\n )\n return fig\n\n elif diag == "box":\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n else:\n fig["layout"].update(\n height=height, width=width, title=title, showlegend=True\n )\n return fig\n\n\ndef create_scatterplotmatrix(\n df,\n index=None,\n endpts=None,\n diag="scatter",\n height=500,\n width=500,\n size=6,\n title="Scatterplot Matrix",\n colormap=None,\n colormap_type="cat",\n dataframe=None,\n headers=None,\n index_vals=None,\n **kwargs,\n):\n """\n Returns data for a scatterplot matrix;\n **deprecated**,\n use instead the plotly.graph_objects trace\n :class:`plotly.graph_objects.Splom`.\n\n :param (array) df: array of the data with column headers\n :param (str) index: name of the index column in data array\n :param (list|tuple) endpts: takes an increasing sequece of numbers\n that defines intervals on the real line. They are used to group\n the entries in an index of numbers into their corresponding\n interval and therefore can be treated as categorical data\n :param (str) diag: sets the chart type for the main diagonal plots.\n The options are 'scatter', 'histogram' and 'box'.\n :param (int|float) height: sets the height of the chart\n :param (int|float) width: sets the width of the chart\n :param (float) size: sets the marker size (in px)\n :param (str) title: the title label of the scatterplot matrix\n :param (str|tuple|list|dict) colormap: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colormap is a list, it must contain valid color types as its\n members.\n If colormap is a dictionary, all the string entries in\n the index column must be a key in colormap. In this case, the\n colormap_type is forced to 'cat' or categorical\n :param (str) colormap_type: determines how colormap is interpreted.\n Valid choices are 'seq' (sequential) and 'cat' (categorical). If\n 'seq' is selected, only the first two colors in colormap will be\n considered (when colormap is a list) and the index values will be\n linearly interpolated between those two colors. This option is\n forced if all index values are numeric.\n If 'cat' is selected, a color from colormap will be assigned to\n each category from index, including the intervals if endpts is\n being used\n :param (dict) **kwargs: a dictionary of scatterplot arguments\n The only forbidden parameters are 'size', 'color' and\n 'colorscale' in 'marker'\n\n Example 1: Vanilla Scatterplot Matrix\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> # Create dataframe\n >>> df = pd.DataFrame(np.random.randn(10, 2),\n ... columns=['Column 1', 'Column 2'])\n\n >>> # Create scatterplot matrix\n >>> fig = create_scatterplotmatrix(df)\n >>> fig.show()\n\n\n Example 2: Indexing a Column\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> # Create dataframe with index\n >>> df = pd.DataFrame(np.random.randn(10, 2),\n ... columns=['A', 'B'])\n\n >>> # Add another column of strings to the dataframe\n >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n ... 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n >>> # Create scatterplot matrix\n >>> fig = create_scatterplotmatrix(df, index='Fruit', size=10)\n >>> fig.show()\n\n\n Example 3: Styling the Diagonal Subplots\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> # Create dataframe with index\n >>> df = pd.DataFrame(np.random.randn(10, 4),\n ... columns=['A', 'B', 'C', 'D'])\n\n >>> # Add another column of strings to the dataframe\n >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',\n ... 'grape', 'pear', 'pear', 'apple', 'pear'])\n\n >>> # Create scatterplot matrix\n >>> fig = create_scatterplotmatrix(df, diag='box', index='Fruit', height=1000,\n ... width=1000)\n >>> fig.show()\n\n\n Example 4: Use a Theme to Style the Subplots\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> # Create dataframe with random data\n >>> df = pd.DataFrame(np.random.randn(100, 3),\n ... columns=['A', 'B', 'C'])\n\n >>> # Create scatterplot matrix using a built-in\n >>> # Plotly palette scale and indexing column 'A'\n >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',\n ... colormap='Blues', height=800, width=800)\n >>> fig.show()\n\n\n Example 5: Example 4 with Interval Factoring\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n\n >>> # Create dataframe with random data\n >>> df = pd.DataFrame(np.random.randn(100, 3),\n ... columns=['A', 'B', 'C'])\n\n >>> # Create scatterplot matrix using a list of 2 rgb tuples\n >>> # and endpoints at -1, 0 and 1\n >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',\n ... colormap=['rgb(140, 255, 50)',\n ... 'rgb(170, 60, 115)', '#6c4774',\n ... (0.5, 0.1, 0.8)],\n ... endpts=[-1, 0, 1], height=800, width=800)\n >>> fig.show()\n\n\n Example 6: Using the colormap as a Dictionary\n\n >>> from plotly.graph_objs import graph_objs\n >>> from plotly.figure_factory import create_scatterplotmatrix\n\n >>> import numpy as np\n >>> import pandas as pd\n >>> import random\n\n >>> # Create dataframe with random data\n >>> df = pd.DataFrame(np.random.randn(100, 3),\n ... columns=['Column A',\n ... 'Column B',\n ... 'Column C'])\n\n >>> # Add new color column to dataframe\n >>> new_column = []\n >>> strange_colors = ['turquoise', 'limegreen', 'goldenrod']\n\n >>> for j in range(100):\n ... new_column.append(random.choice(strange_colors))\n >>> df['Colors'] = pd.Series(new_column, index=df.index)\n\n >>> # Create scatterplot matrix using a dictionary of hex color values\n >>> # which correspond to actual color names in 'Colors' column\n >>> fig = create_scatterplotmatrix(\n ... df, diag='box', index='Colors',\n ... colormap= dict(\n ... turquoise = '#00F5FF',\n ... limegreen = '#32CD32',\n ... goldenrod = '#DAA520'\n ... ),\n ... colormap_type='cat',\n ... height=800, width=800\n ... )\n >>> fig.show()\n """\n # TODO: protected until #282\n if dataframe is None:\n dataframe = []\n if headers is None:\n headers = []\n if index_vals is None:\n index_vals = []\n\n validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs)\n\n # Validate colormap\n if isinstance(colormap, dict):\n colormap = clrs.validate_colors_dict(colormap, "rgb")\n elif isinstance(colormap, str) and "rgb" not in colormap and "#" not in colormap:\n if colormap not in clrs.PLOTLY_SCALES.keys():\n raise exceptions.PlotlyError(\n "If 'colormap' is a string, it must be the name "\n "of a Plotly Colorscale. The available colorscale "\n "names are {}".format(clrs.PLOTLY_SCALES.keys())\n )\n else:\n # TODO change below to allow the correct Plotly colorscale\n colormap = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES[colormap])\n # keep only first and last item - fix later\n colormap = [colormap[0]] + [colormap[-1]]\n colormap = clrs.validate_colors(colormap, "rgb")\n else:\n colormap = clrs.validate_colors(colormap, "rgb")\n\n if not index:\n for name in df:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n # Check for same data-type in df columns\n utils.validate_dataframe(dataframe)\n figure = scatterplot(\n dataframe, headers, diag, size, height, width, title, **kwargs\n )\n return figure\n else:\n # Validate index selection\n if index not in df:\n raise exceptions.PlotlyError(\n "Make sure you set the index "\n "input variable to one of the "\n "column names of your "\n "dataframe."\n )\n index_vals = df[index].values.tolist()\n for name in df:\n if name != index:\n headers.append(name)\n for name in headers:\n dataframe.append(df[name].values.tolist())\n\n # check for same data-type in each df column\n utils.validate_dataframe(dataframe)\n utils.validate_index(index_vals)\n\n # check if all colormap keys are in the index\n # if colormap is a dictionary\n if isinstance(colormap, dict):\n for key in colormap:\n if not all(index in colormap for index in index_vals):\n raise exceptions.PlotlyError(\n "If colormap is a "\n "dictionary, all the "\n "names in the index "\n "must be keys."\n )\n figure = scatterplot_dict(\n dataframe,\n headers,\n diag,\n size,\n height,\n width,\n title,\n index,\n index_vals,\n endpts,\n colormap,\n colormap_type,\n **kwargs,\n )\n return figure\n\n else:\n figure = scatterplot_theme(\n dataframe,\n headers,\n diag,\n size,\n height,\n width,\n title,\n index,\n index_vals,\n endpts,\n colormap,\n colormap_type,\n **kwargs,\n )\n return figure\n
.venv\Lib\site-packages\plotly\figure_factory\_scatterplot.py
_scatterplot.py
Python
44,753
0.95
0.138326
0.076012
python-kit
321
2024-01-30T02:03:45.739388
BSD-3-Clause
false
d7864d3490f6fab9674df6e683f2409a
import math\n\nfrom plotly import exceptions, optional_imports\nfrom plotly.figure_factory import utils\nfrom plotly.graph_objs import graph_objs\n\nnp = optional_imports.get_module("numpy")\n\n\ndef validate_streamline(x, y):\n """\n Streamline-specific validations\n\n Specifically, this checks that x and y are both evenly spaced,\n and that the package numpy is available.\n\n See FigureFactory.create_streamline() for params\n\n :raises: (ImportError) If numpy is not available.\n :raises: (PlotlyError) If x is not evenly spaced.\n :raises: (PlotlyError) If y is not evenly spaced.\n """\n if np is False:\n raise ImportError("FigureFactory.create_streamline requires numpy")\n for index in range(len(x) - 1):\n if ((x[index + 1] - x[index]) - (x[1] - x[0])) > 0.0001:\n raise exceptions.PlotlyError(\n "x must be a 1 dimensional, evenly spaced array"\n )\n for index in range(len(y) - 1):\n if ((y[index + 1] - y[index]) - (y[1] - y[0])) > 0.0001:\n raise exceptions.PlotlyError(\n "y must be a 1 dimensional, evenly spaced array"\n )\n\n\ndef create_streamline(\n x, y, u, v, density=1, angle=math.pi / 9, arrow_scale=0.09, **kwargs\n):\n """\n Returns data for a streamline plot.\n\n :param (list|ndarray) x: 1 dimensional, evenly spaced list or array\n :param (list|ndarray) y: 1 dimensional, evenly spaced list or array\n :param (ndarray) u: 2 dimensional array\n :param (ndarray) v: 2 dimensional array\n :param (float|int) density: controls the density of streamlines in\n plot. This is multiplied by 30 to scale similiarly to other\n available streamline functions such as matplotlib.\n Default = 1\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :param kwargs: kwargs passed through plotly.graph_objs.Scatter\n for more information on valid kwargs call\n help(plotly.graph_objs.Scatter)\n\n :rtype (dict): returns a representation of streamline figure.\n\n Example 1: Plot simple streamline and increase arrow size\n\n >>> from plotly.figure_factory import create_streamline\n >>> import plotly.graph_objects as go\n >>> import numpy as np\n >>> import math\n\n >>> # Add data\n >>> x = np.linspace(-3, 3, 100)\n >>> y = np.linspace(-3, 3, 100)\n >>> Y, X = np.meshgrid(x, y)\n >>> u = -1 - X**2 + Y\n >>> v = 1 + X - Y**2\n >>> u = u.T # Transpose\n >>> v = v.T # Transpose\n\n >>> # Create streamline\n >>> fig = create_streamline(x, y, u, v, arrow_scale=.1)\n >>> fig.show()\n\n Example 2: from nbviewer.ipython.org/github/barbagroup/AeroPython\n\n >>> from plotly.figure_factory import create_streamline\n >>> import numpy as np\n >>> import math\n\n >>> # Add data\n >>> N = 50\n >>> x_start, x_end = -2.0, 2.0\n >>> y_start, y_end = -1.0, 1.0\n >>> x = np.linspace(x_start, x_end, N)\n >>> y = np.linspace(y_start, y_end, N)\n >>> X, Y = np.meshgrid(x, y)\n >>> ss = 5.0\n >>> x_s, y_s = -1.0, 0.0\n\n >>> # Compute the velocity field on the mesh grid\n >>> u_s = ss/(2*np.pi) * (X-x_s)/((X-x_s)**2 + (Y-y_s)**2)\n >>> v_s = ss/(2*np.pi) * (Y-y_s)/((X-x_s)**2 + (Y-y_s)**2)\n\n >>> # Create streamline\n >>> fig = create_streamline(x, y, u_s, v_s, density=2, name='streamline')\n\n >>> # Add source point\n >>> point = go.Scatter(x=[x_s], y=[y_s], mode='markers',\n ... marker_size=14, name='source point')\n\n >>> fig.add_trace(point) # doctest: +SKIP\n >>> fig.show()\n """\n utils.validate_equal_length(x, y)\n utils.validate_equal_length(u, v)\n validate_streamline(x, y)\n utils.validate_positive_scalars(density=density, arrow_scale=arrow_scale)\n\n streamline_x, streamline_y = _Streamline(\n x, y, u, v, density, angle, arrow_scale\n ).sum_streamlines()\n arrow_x, arrow_y = _Streamline(\n x, y, u, v, density, angle, arrow_scale\n ).get_streamline_arrows()\n\n streamline = graph_objs.Scatter(\n x=streamline_x + arrow_x, y=streamline_y + arrow_y, mode="lines", **kwargs\n )\n\n data = [streamline]\n layout = graph_objs.Layout(hovermode="closest")\n\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Streamline(object):\n """\n Refer to FigureFactory.create_streamline() for docstring\n """\n\n def __init__(self, x, y, u, v, density, angle, arrow_scale, **kwargs):\n self.x = np.array(x)\n self.y = np.array(y)\n self.u = np.array(u)\n self.v = np.array(v)\n self.angle = angle\n self.arrow_scale = arrow_scale\n self.density = int(30 * density) # Scale similarly to other functions\n self.delta_x = self.x[1] - self.x[0]\n self.delta_y = self.y[1] - self.y[0]\n self.val_x = self.x\n self.val_y = self.y\n\n # Set up spacing\n self.blank = np.zeros((self.density, self.density))\n self.spacing_x = len(self.x) / float(self.density - 1)\n self.spacing_y = len(self.y) / float(self.density - 1)\n self.trajectories = []\n\n # Rescale speed onto axes-coordinates\n self.u = self.u / (self.x[-1] - self.x[0])\n self.v = self.v / (self.y[-1] - self.y[0])\n self.speed = np.sqrt(self.u**2 + self.v**2)\n\n # Rescale u and v for integrations.\n self.u *= len(self.x)\n self.v *= len(self.y)\n self.st_x = []\n self.st_y = []\n self.get_streamlines()\n streamline_x, streamline_y = self.sum_streamlines()\n arrows_x, arrows_y = self.get_streamline_arrows()\n\n def blank_pos(self, xi, yi):\n """\n Set up positions for trajectories to be used with rk4 function.\n """\n return (int((xi / self.spacing_x) + 0.5), int((yi / self.spacing_y) + 0.5))\n\n def value_at(self, a, xi, yi):\n """\n Set up for RK4 function, based on Bokeh's streamline code\n """\n if isinstance(xi, np.ndarray):\n self.x = xi.astype(int)\n self.y = yi.astype(int)\n else:\n self.val_x = int(xi)\n self.val_y = int(yi)\n a00 = a[self.val_y, self.val_x]\n a01 = a[self.val_y, self.val_x + 1]\n a10 = a[self.val_y + 1, self.val_x]\n a11 = a[self.val_y + 1, self.val_x + 1]\n xt = xi - self.val_x\n yt = yi - self.val_y\n a0 = a00 * (1 - xt) + a01 * xt\n a1 = a10 * (1 - xt) + a11 * xt\n return a0 * (1 - yt) + a1 * yt\n\n def rk4_integrate(self, x0, y0):\n """\n RK4 forward and back trajectories from the initial conditions.\n\n Adapted from Bokeh's streamline -uses Runge-Kutta method to fill\n x and y trajectories then checks length of traj (s in units of axes)\n """\n\n def f(xi, yi):\n dt_ds = 1.0 / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return ui * dt_ds, vi * dt_ds\n\n def g(xi, yi):\n dt_ds = 1.0 / self.value_at(self.speed, xi, yi)\n ui = self.value_at(self.u, xi, yi)\n vi = self.value_at(self.v, xi, yi)\n return -ui * dt_ds, -vi * dt_ds\n\n def check(xi, yi):\n return (0 <= xi < len(self.x) - 1) and (0 <= yi < len(self.y) - 1)\n\n xb_changes = []\n yb_changes = []\n\n def rk4(x0, y0, f):\n ds = 0.01\n stotal = 0\n xi = x0\n yi = y0\n xb, yb = self.blank_pos(xi, yi)\n xf_traj = []\n yf_traj = []\n while check(xi, yi):\n xf_traj.append(xi)\n yf_traj.append(yi)\n try:\n k1x, k1y = f(xi, yi)\n k2x, k2y = f(xi + 0.5 * ds * k1x, yi + 0.5 * ds * k1y)\n k3x, k3y = f(xi + 0.5 * ds * k2x, yi + 0.5 * ds * k2y)\n k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)\n except IndexError:\n break\n xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.0\n yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.0\n if not check(xi, yi):\n break\n stotal += ds\n new_xb, new_yb = self.blank_pos(xi, yi)\n if new_xb != xb or new_yb != yb:\n if self.blank[new_yb, new_xb] == 0:\n self.blank[new_yb, new_xb] = 1\n xb_changes.append(new_xb)\n yb_changes.append(new_yb)\n xb = new_xb\n yb = new_yb\n else:\n break\n if stotal > 2:\n break\n return stotal, xf_traj, yf_traj\n\n sf, xf_traj, yf_traj = rk4(x0, y0, f)\n sb, xb_traj, yb_traj = rk4(x0, y0, g)\n stotal = sf + sb\n x_traj = xb_traj[::-1] + xf_traj[1:]\n y_traj = yb_traj[::-1] + yf_traj[1:]\n\n if len(x_traj) < 1:\n return None\n if stotal > 0.2:\n initxb, inityb = self.blank_pos(x0, y0)\n self.blank[inityb, initxb] = 1\n return x_traj, y_traj\n else:\n for xb, yb in zip(xb_changes, yb_changes):\n self.blank[yb, xb] = 0\n return None\n\n def traj(self, xb, yb):\n """\n Integrate trajectories\n\n :param (int) xb: results of passing xi through self.blank_pos\n :param (int) xy: results of passing yi through self.blank_pos\n\n Calculate each trajectory based on rk4 integrate method.\n """\n\n if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:\n return\n if self.blank[yb, xb] == 0:\n t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)\n if t is not None:\n self.trajectories.append(t)\n\n def get_streamlines(self):\n """\n Get streamlines by building trajectory set.\n """\n for indent in range(self.density // 2):\n for xi in range(self.density - 2 * indent):\n self.traj(xi + indent, indent)\n self.traj(xi + indent, self.density - 1 - indent)\n self.traj(indent, xi + indent)\n self.traj(self.density - 1 - indent, xi + indent)\n\n self.st_x = [\n np.array(t[0]) * self.delta_x + self.x[0] for t in self.trajectories\n ]\n self.st_y = [\n np.array(t[1]) * self.delta_y + self.y[0] for t in self.trajectories\n ]\n\n for index in range(len(self.st_x)):\n self.st_x[index] = self.st_x[index].tolist()\n self.st_x[index].append(np.nan)\n\n for index in range(len(self.st_y)):\n self.st_y[index] = self.st_y[index].tolist()\n self.st_y[index].append(np.nan)\n\n def get_streamline_arrows(self):\n """\n Makes an arrow for each streamline.\n\n Gets angle of streamline at 1/3 mark and creates arrow coordinates\n based off of user defined angle and arrow_scale.\n\n :param (array) st_x: x-values for all streamlines\n :param (array) st_y: y-values for all streamlines\n :param (angle in radians) angle: angle of arrowhead. Default = pi/9\n :param (float in [0,1]) arrow_scale: value to scale length of arrowhead\n Default = .09\n :rtype (list, list) arrows_x: x-values to create arrowhead and\n arrows_y: y-values to create arrowhead\n """\n arrow_end_x = np.empty((len(self.st_x)))\n arrow_end_y = np.empty((len(self.st_y)))\n arrow_start_x = np.empty((len(self.st_x)))\n arrow_start_y = np.empty((len(self.st_y)))\n for index in range(len(self.st_x)):\n arrow_end_x[index] = self.st_x[index][int(len(self.st_x[index]) / 3)]\n arrow_start_x[index] = self.st_x[index][\n (int(len(self.st_x[index]) / 3)) - 1\n ]\n arrow_end_y[index] = self.st_y[index][int(len(self.st_y[index]) / 3)]\n arrow_start_y[index] = self.st_y[index][\n (int(len(self.st_y[index]) / 3)) - 1\n ]\n\n dif_x = arrow_end_x - arrow_start_x\n dif_y = arrow_end_y - arrow_start_y\n\n orig_err = np.geterr()\n np.seterr(divide="ignore", invalid="ignore")\n streamline_ang = np.arctan(dif_y / dif_x)\n np.seterr(**orig_err)\n\n ang1 = streamline_ang + (self.angle)\n ang2 = streamline_ang - (self.angle)\n\n seg1_x = np.cos(ang1) * self.arrow_scale\n seg1_y = np.sin(ang1) * self.arrow_scale\n seg2_x = np.cos(ang2) * self.arrow_scale\n seg2_y = np.sin(ang2) * self.arrow_scale\n\n point1_x = np.empty((len(dif_x)))\n point1_y = np.empty((len(dif_y)))\n point2_x = np.empty((len(dif_x)))\n point2_y = np.empty((len(dif_y)))\n\n for index in range(len(dif_x)):\n if dif_x[index] >= 0:\n point1_x[index] = arrow_end_x[index] - seg1_x[index]\n point1_y[index] = arrow_end_y[index] - seg1_y[index]\n point2_x[index] = arrow_end_x[index] - seg2_x[index]\n point2_y[index] = arrow_end_y[index] - seg2_y[index]\n else:\n point1_x[index] = arrow_end_x[index] + seg1_x[index]\n point1_y[index] = arrow_end_y[index] + seg1_y[index]\n point2_x[index] = arrow_end_x[index] + seg2_x[index]\n point2_y[index] = arrow_end_y[index] + seg2_y[index]\n\n space = np.empty((len(point1_x)))\n space[:] = np.nan\n\n # Combine arrays into array\n arrows_x = np.array([point1_x, arrow_end_x, point2_x, space])\n arrows_x = arrows_x.flatten("F")\n arrows_x = arrows_x.tolist()\n\n # Combine arrays into array\n arrows_y = np.array([point1_y, arrow_end_y, point2_y, space])\n arrows_y = arrows_y.flatten("F")\n arrows_y = arrows_y.tolist()\n\n return arrows_x, arrows_y\n\n def sum_streamlines(self):\n """\n Makes all streamlines readable as a single trace.\n\n :rtype (list, list): streamline_x: all x values for each streamline\n combined into single list and streamline_y: all y values for each\n streamline combined into single list\n """\n streamline_x = sum(self.st_x, [])\n streamline_y = sum(self.st_y, [])\n return streamline_x, streamline_y\n
.venv\Lib\site-packages\plotly\figure_factory\_streamline.py
_streamline.py
Python
14,499
0.95
0.137931
0.014706
python-kit
832
2024-07-08T14:58:59.114412
BSD-3-Clause
false
980014cb508dc117e411547a2a6a5c63
from plotly import exceptions, optional_imports\nfrom plotly.graph_objs import graph_objs\n\npd = optional_imports.get_module("pandas")\n\n\ndef validate_table(table_text, font_colors):\n """\n Table-specific validations\n\n Check that font_colors is supplied correctly (1, 3, or len(text)\n colors).\n\n :raises: (PlotlyError) If font_colors is supplied incorretly.\n\n See FigureFactory.create_table() for params\n """\n font_colors_len_options = [1, 3, len(table_text)]\n if len(font_colors) not in font_colors_len_options:\n raise exceptions.PlotlyError(\n "Oops, font_colors should be a list of length 1, 3 or len(text)"\n )\n\n\ndef create_table(\n table_text,\n colorscale=None,\n font_colors=None,\n index=False,\n index_title="",\n annotation_offset=0.45,\n height_constant=30,\n hoverinfo="none",\n **kwargs,\n):\n """\n Function that creates data tables.\n\n See also the plotly.graph_objects trace\n :class:`plotly.graph_objects.Table`\n\n :param (pandas.Dataframe | list[list]) text: data for table.\n :param (str|list[list]) colorscale: Colorscale for table where the\n color at value 0 is the header color, .5 is the first table color\n and 1 is the second table color. (Set .5 and 1 to avoid the striped\n table effect). Default=[[0, '#66b2ff'], [.5, '#d9d9d9'],\n [1, '#ffffff']]\n :param (list) font_colors: Color for fonts in table. Can be a single\n color, three colors, or a color for each row in the table.\n Default=['#000000'] (black text for the entire table)\n :param (int) height_constant: Constant multiplied by # of rows to\n create table height. Default=30.\n :param (bool) index: Create (header-colored) index column index from\n Pandas dataframe or list[0] for each list in text. Default=False.\n :param (string) index_title: Title for index column. Default=''.\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple Plotly Table\n\n >>> from plotly.figure_factory import create_table\n\n >>> text = [['Country', 'Year', 'Population'],\n ... ['US', 2000, 282200000],\n ... ['Canada', 2000, 27790000],\n ... ['US', 2010, 309000000],\n ... ['Canada', 2010, 34000000]]\n\n >>> table = create_table(text)\n >>> table.show()\n\n Example 2: Table with Custom Coloring\n\n >>> from plotly.figure_factory import create_table\n >>> text = [['Country', 'Year', 'Population'],\n ... ['US', 2000, 282200000],\n ... ['Canada', 2000, 27790000],\n ... ['US', 2010, 309000000],\n ... ['Canada', 2010, 34000000]]\n >>> table = create_table(text,\n ... colorscale=[[0, '#000000'],\n ... [.5, '#80beff'],\n ... [1, '#cce5ff']],\n ... font_colors=['#ffffff', '#000000',\n ... '#000000'])\n >>> table.show()\n\n Example 3: Simple Plotly Table with Pandas\n\n >>> from plotly.figure_factory import create_table\n >>> import pandas as pd\n >>> df = pd.read_csv('http://www.stat.ubc.ca/~jenny/notOcto/STAT545A/examples/gapminder/data/gapminderDataFiveYear.txt', sep='\t')\n >>> df_p = df[0:25]\n >>> table_simple = create_table(df_p)\n >>> table_simple.show()\n\n """\n\n # Avoiding mutables in the call signature\n colorscale = (\n colorscale\n if colorscale is not None\n else [[0, "#00083e"], [0.5, "#ededee"], [1, "#ffffff"]]\n )\n font_colors = (\n font_colors if font_colors is not None else ["#ffffff", "#000000", "#000000"]\n )\n\n validate_table(table_text, font_colors)\n table_matrix = _Table(\n table_text,\n colorscale,\n font_colors,\n index,\n index_title,\n annotation_offset,\n **kwargs,\n ).get_table_matrix()\n annotations = _Table(\n table_text,\n colorscale,\n font_colors,\n index,\n index_title,\n annotation_offset,\n **kwargs,\n ).make_table_annotations()\n\n trace = dict(\n type="heatmap",\n z=table_matrix,\n opacity=0.75,\n colorscale=colorscale,\n showscale=False,\n hoverinfo=hoverinfo,\n **kwargs,\n )\n\n data = [trace]\n layout = dict(\n annotations=annotations,\n height=len(table_matrix) * height_constant + 50,\n margin=dict(t=0, b=0, r=0, l=0),\n yaxis=dict(\n autorange="reversed",\n zeroline=False,\n gridwidth=2,\n ticks="",\n dtick=1,\n tick0=0.5,\n showticklabels=False,\n ),\n xaxis=dict(\n zeroline=False,\n gridwidth=2,\n ticks="",\n dtick=1,\n tick0=-0.5,\n showticklabels=False,\n ),\n )\n return graph_objs.Figure(data=data, layout=layout)\n\n\nclass _Table(object):\n """\n Refer to TraceFactory.create_table() for docstring\n """\n\n def __init__(\n self,\n table_text,\n colorscale,\n font_colors,\n index,\n index_title,\n annotation_offset,\n **kwargs,\n ):\n if pd and isinstance(table_text, pd.DataFrame):\n headers = table_text.columns.tolist()\n table_text_index = table_text.index.tolist()\n table_text = table_text.values.tolist()\n table_text.insert(0, headers)\n if index:\n table_text_index.insert(0, index_title)\n for i in range(len(table_text)):\n table_text[i].insert(0, table_text_index[i])\n self.table_text = table_text\n self.colorscale = colorscale\n self.font_colors = font_colors\n self.index = index\n self.annotation_offset = annotation_offset\n self.x = range(len(table_text[0]))\n self.y = range(len(table_text))\n\n def get_table_matrix(self):\n """\n Create z matrix to make heatmap with striped table coloring\n\n :rtype (list[list]) table_matrix: z matrix to make heatmap with striped\n table coloring.\n """\n header = [0] * len(self.table_text[0])\n odd_row = [0.5] * len(self.table_text[0])\n even_row = [1] * len(self.table_text[0])\n table_matrix = [None] * len(self.table_text)\n table_matrix[0] = header\n for i in range(1, len(self.table_text), 2):\n table_matrix[i] = odd_row\n for i in range(2, len(self.table_text), 2):\n table_matrix[i] = even_row\n if self.index:\n for array in table_matrix:\n array[0] = 0\n return table_matrix\n\n def get_table_font_color(self):\n """\n Fill font-color array.\n\n Table text color can vary by row so this extends a single color or\n creates an array to set a header color and two alternating colors to\n create the striped table pattern.\n\n :rtype (list[list]) all_font_colors: list of font colors for each row\n in table.\n """\n if len(self.font_colors) == 1:\n all_font_colors = self.font_colors * len(self.table_text)\n elif len(self.font_colors) == 3:\n all_font_colors = list(range(len(self.table_text)))\n all_font_colors[0] = self.font_colors[0]\n for i in range(1, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[1]\n for i in range(2, len(self.table_text), 2):\n all_font_colors[i] = self.font_colors[2]\n elif len(self.font_colors) == len(self.table_text):\n all_font_colors = self.font_colors\n else:\n all_font_colors = ["#000000"] * len(self.table_text)\n return all_font_colors\n\n def make_table_annotations(self):\n """\n Generate annotations to fill in table text\n\n :rtype (list) annotations: list of annotations for each cell of the\n table.\n """\n all_font_colors = _Table.get_table_font_color(self)\n annotations = []\n for n, row in enumerate(self.table_text):\n for m, val in enumerate(row):\n # Bold text in header and index\n format_text = (\n "<b>" + str(val) + "</b>"\n if n == 0 or self.index and m < 1\n else str(val)\n )\n # Match font color of index to font color of header\n font_color = (\n self.font_colors[0] if self.index and m == 0 else all_font_colors[n]\n )\n annotations.append(\n graph_objs.layout.Annotation(\n text=format_text,\n x=self.x[m] - self.annotation_offset,\n y=self.y[n],\n xref="x1",\n yref="y1",\n align="left",\n xanchor="left",\n font=dict(color=font_color),\n showarrow=False,\n )\n )\n return annotations\n
.venv\Lib\site-packages\plotly\figure_factory\_table.py
_table.py
Python
9,374
0.95
0.128571
0.032389
node-utils
812
2024-11-29T01:31:06.315799
Apache-2.0
false
3e45fd3c499974dad1fd4a921dc653db
import plotly.colors as clrs\nfrom plotly.graph_objs import graph_objs as go\nfrom plotly import exceptions\nfrom plotly import optional_imports\n\nfrom skimage import measure\n\nnp = optional_imports.get_module("numpy")\nscipy_interp = optional_imports.get_module("scipy.interpolate")\n\n# -------------------------- Layout ------------------------------\n\n\ndef _ternary_layout(\n title="Ternary contour plot", width=550, height=525, pole_labels=["a", "b", "c"]\n):\n """\n Layout of ternary contour plot, to be passed to ``go.FigureWidget``\n object.\n\n Parameters\n ==========\n title : str or None\n Title of ternary plot\n width : int\n Figure width.\n height : int\n Figure height.\n pole_labels : str, default ['a', 'b', 'c']\n Names of the three poles of the triangle.\n """\n return dict(\n title=title,\n width=width,\n height=height,\n ternary=dict(\n sum=1,\n aaxis=dict(\n title=dict(text=pole_labels[0]), min=0.01, linewidth=2, ticks="outside"\n ),\n baxis=dict(\n title=dict(text=pole_labels[1]), min=0.01, linewidth=2, ticks="outside"\n ),\n caxis=dict(\n title=dict(text=pole_labels[2]), min=0.01, linewidth=2, ticks="outside"\n ),\n ),\n showlegend=False,\n )\n\n\n# ------------- Transformations of coordinates -------------------\n\n\ndef _replace_zero_coords(ternary_data, delta=0.0005):\n """\n Replaces zero ternary coordinates with delta and normalize the new\n triplets (a, b, c).\n\n Parameters\n ----------\n\n ternary_data : ndarray of shape (N, 3)\n\n delta : float\n Small float to regularize logarithm.\n\n Notes\n -----\n Implements a method\n by J. A. Martin-Fernandez, C. Barcelo-Vidal, V. Pawlowsky-Glahn,\n Dealing with zeros and missing values in compositional data sets\n using nonparametric imputation, Mathematical Geology 35 (2003),\n pp 253-278.\n """\n zero_mask = ternary_data == 0\n is_any_coord_zero = np.any(zero_mask, axis=0)\n\n unity_complement = 1 - delta * is_any_coord_zero\n if np.any(unity_complement) < 0:\n raise ValueError(\n "The provided value of delta led to negative"\n "ternary coords.Set a smaller delta"\n )\n ternary_data = np.where(zero_mask, delta, unity_complement * ternary_data)\n return ternary_data\n\n\ndef _ilr_transform(barycentric):\n """\n Perform Isometric Log-Ratio on barycentric (compositional) data.\n\n Parameters\n ----------\n barycentric: ndarray of shape (3, N)\n Barycentric coordinates.\n\n References\n ----------\n "An algebraic method to compute isometric logratio transformation and\n back transformation of compositional data", Jarauta-Bragulat, E.,\n Buenestado, P.; Hervada-Sala, C., in Proc. of the Annual Conf. of the\n Intl Assoc for Math Geology, 2003, pp 31-30.\n """\n barycentric = np.asarray(barycentric)\n x_0 = np.log(barycentric[0] / barycentric[1]) / np.sqrt(2)\n x_1 = (\n 1.0 / np.sqrt(6) * np.log(barycentric[0] * barycentric[1] / barycentric[2] ** 2)\n )\n ilr_tdata = np.stack((x_0, x_1))\n return ilr_tdata\n\n\ndef _ilr_inverse(x):\n """\n Perform inverse Isometric Log-Ratio (ILR) transform to retrieve\n barycentric (compositional) data.\n\n Parameters\n ----------\n x : array of shape (2, N)\n Coordinates in ILR space.\n\n References\n ----------\n "An algebraic method to compute isometric logratio transformation and\n back transformation of compositional data", Jarauta-Bragulat, E.,\n Buenestado, P.; Hervada-Sala, C., in Proc. of the Annual Conf. of the\n Intl Assoc for Math Geology, 2003, pp 31-30.\n """\n x = np.array(x)\n matrix = np.array([[0.5, 1, 1.0], [-0.5, 1, 1.0], [0.0, 0.0, 1.0]])\n s = np.sqrt(2) / 2\n t = np.sqrt(3 / 2)\n Sk = np.einsum("ik, kj -> ij", np.array([[s, t], [-s, t]]), x)\n Z = -np.log(1 + np.exp(Sk).sum(axis=0))\n log_barycentric = np.einsum(\n "ik, kj -> ij", matrix, np.stack((2 * s * x[0], t * x[1], Z))\n )\n iilr_tdata = np.exp(log_barycentric)\n return iilr_tdata\n\n\ndef _transform_barycentric_cartesian():\n """\n Returns the transformation matrix from barycentric to Cartesian\n coordinates and conversely.\n """\n # reference triangle\n tri_verts = np.array([[0.5, np.sqrt(3) / 2], [0, 0], [1, 0]])\n M = np.array([tri_verts[:, 0], tri_verts[:, 1], np.ones(3)])\n return M, np.linalg.inv(M)\n\n\ndef _prepare_barycentric_coord(b_coords):\n """\n Check ternary coordinates and return the right barycentric coordinates.\n """\n if not isinstance(b_coords, (list, np.ndarray)):\n raise ValueError(\n "Data should be either an array of shape (n,m),"\n "or a list of n m-lists, m=2 or 3"\n )\n b_coords = np.asarray(b_coords)\n if b_coords.shape[0] not in (2, 3):\n raise ValueError(\n "A point should have 2 (a, b) or 3 (a, b, c)barycentric coordinates"\n )\n if (\n (len(b_coords) == 3)\n and not np.allclose(b_coords.sum(axis=0), 1, rtol=0.01)\n and not np.allclose(b_coords.sum(axis=0), 100, rtol=0.01)\n ):\n msg = "The sum of coordinates should be 1 or 100 for all data points"\n raise ValueError(msg)\n\n if len(b_coords) == 2:\n A, B = b_coords\n C = 1 - (A + B)\n else:\n A, B, C = b_coords / b_coords.sum(axis=0)\n if np.any(np.stack((A, B, C)) < 0):\n raise ValueError("Barycentric coordinates should be positive.")\n return np.stack((A, B, C))\n\n\ndef _compute_grid(coordinates, values, interp_mode="ilr"):\n """\n Transform data points with Cartesian or ILR mapping, then Compute\n interpolation on a regular grid.\n\n Parameters\n ==========\n\n coordinates : array-like\n Barycentric coordinates of data points.\n values : 1-d array-like\n Data points, field to be represented as contours.\n interp_mode : 'ilr' (default) or 'cartesian'\n Defines how data are interpolated to compute contours.\n """\n if interp_mode == "cartesian":\n M, invM = _transform_barycentric_cartesian()\n coord_points = np.einsum("ik, kj -> ij", M, coordinates)\n elif interp_mode == "ilr":\n coordinates = _replace_zero_coords(coordinates)\n coord_points = _ilr_transform(coordinates)\n else:\n raise ValueError("interp_mode should be cartesian or ilr")\n xx, yy = coord_points[:2]\n x_min, x_max = xx.min(), xx.max()\n y_min, y_max = yy.min(), yy.max()\n n_interp = max(200, int(np.sqrt(len(values))))\n gr_x = np.linspace(x_min, x_max, n_interp)\n gr_y = np.linspace(y_min, y_max, n_interp)\n grid_x, grid_y = np.meshgrid(gr_x, gr_y)\n # We use cubic interpolation, except outside of the convex hull\n # of data points where we use nearest neighbor values.\n grid_z = scipy_interp.griddata(\n coord_points[:2].T, values, (grid_x, grid_y), method="cubic"\n )\n return grid_z, gr_x, gr_y\n\n\n# ----------------------- Contour traces ----------------------\n\n\ndef _polygon_area(x, y):\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n\ndef _colors(ncontours, colormap=None):\n """\n Return a list of ``ncontours`` colors from the ``colormap`` colorscale.\n """\n if colormap in clrs.PLOTLY_SCALES.keys():\n cmap = clrs.PLOTLY_SCALES[colormap]\n else:\n raise exceptions.PlotlyError(\n "Colorscale must be a valid Plotly Colorscale."\n "The available colorscale names are {}".format(clrs.PLOTLY_SCALES.keys())\n )\n values = np.linspace(0, 1, ncontours)\n vals_cmap = np.array([pair[0] for pair in cmap])\n cols = np.array([pair[1] for pair in cmap])\n inds = np.searchsorted(vals_cmap, values)\n if "#" in cols[0]: # for Viridis\n cols = [clrs.label_rgb(clrs.hex_to_rgb(col)) for col in cols]\n\n colors = [cols[0]]\n for ind, val in zip(inds[1:], values[1:]):\n val1, val2 = vals_cmap[ind - 1], vals_cmap[ind]\n interm = (val - val1) / (val2 - val1)\n col = clrs.find_intermediate_color(\n cols[ind - 1], cols[ind], interm, colortype="rgb"\n )\n colors.append(col)\n return colors\n\n\ndef _is_invalid_contour(x, y):\n """\n Utility function for _contour_trace\n\n Contours with an area of the order as 1 pixel are considered spurious.\n """\n too_small = np.all(np.abs(x - x[0]) < 2) and np.all(np.abs(y - y[0]) < 2)\n return too_small\n\n\ndef _extract_contours(im, values, colors):\n """\n Utility function for _contour_trace.\n\n In ``im`` only one part of the domain has valid values (corresponding\n to a subdomain where barycentric coordinates are well defined). When\n computing contours, we need to assign values outside of this domain.\n We can choose a value either smaller than all the values inside the\n valid domain, or larger. This value must be chose with caution so that\n no spurious contours are added. For example, if the boundary of the valid\n domain has large values and the outer value is set to a small one, all\n intermediate contours will be added at the boundary.\n\n Therefore, we compute the two sets of contours (with an outer value\n smaller of larger than all values in the valid domain), and choose\n the value resulting in a smaller total number of contours. There might\n be a faster way to do this, but it works...\n """\n mask_nan = np.isnan(im)\n im_min, im_max = (\n im[np.logical_not(mask_nan)].min(),\n im[np.logical_not(mask_nan)].max(),\n )\n zz_min = np.copy(im)\n zz_min[mask_nan] = 2 * im_min\n zz_max = np.copy(im)\n zz_max[mask_nan] = 2 * im_max\n all_contours1, all_values1, all_areas1, all_colors1 = [], [], [], []\n all_contours2, all_values2, all_areas2, all_colors2 = [], [], [], []\n for i, val in enumerate(values):\n contour_level1 = measure.find_contours(zz_min, val)\n contour_level2 = measure.find_contours(zz_max, val)\n all_contours1.extend(contour_level1)\n all_contours2.extend(contour_level2)\n all_values1.extend([val] * len(contour_level1))\n all_values2.extend([val] * len(contour_level2))\n all_areas1.extend(\n [_polygon_area(contour.T[1], contour.T[0]) for contour in contour_level1]\n )\n all_areas2.extend(\n [_polygon_area(contour.T[1], contour.T[0]) for contour in contour_level2]\n )\n all_colors1.extend([colors[i]] * len(contour_level1))\n all_colors2.extend([colors[i]] * len(contour_level2))\n if len(all_contours1) <= len(all_contours2):\n return all_contours1, all_values1, all_areas1, all_colors1\n else:\n return all_contours2, all_values2, all_areas2, all_colors2\n\n\ndef _add_outer_contour(\n all_contours,\n all_values,\n all_areas,\n all_colors,\n values,\n val_outer,\n v_min,\n v_max,\n colors,\n color_min,\n color_max,\n):\n """\n Utility function for _contour_trace\n\n Adds the background color to fill gaps outside of computed contours.\n\n To compute the background color, the color of the contour with largest\n area (``val_outer``) is used. As background color, we choose the next\n color value in the direction of the extrema of the colormap.\n\n Then we add information for the outer contour for the different lists\n provided as arguments.\n\n A discrete colormap with all used colors is also returned (to be used\n by colorscale trace).\n """\n # The exact value of outer contour is not used when defining the trace\n outer_contour = 20 * np.array([[0, 0, 1], [0, 1, 0.5]]).T\n all_contours = [outer_contour] + all_contours\n delta_values = np.diff(values)[0]\n values = np.concatenate(\n ([values[0] - delta_values], values, [values[-1] + delta_values])\n )\n colors = np.concatenate(([color_min], colors, [color_max]))\n index = np.nonzero(values == val_outer)[0][0]\n if index < len(values) / 2:\n index -= 1\n else:\n index += 1\n all_colors = [colors[index]] + all_colors\n all_values = [values[index]] + all_values\n all_areas = [0] + all_areas\n used_colors = [color for color in colors if color in all_colors]\n # Define discrete colorscale\n color_number = len(used_colors)\n scale = np.linspace(0, 1, color_number + 1)\n discrete_cm = []\n for i, color in enumerate(used_colors):\n discrete_cm.append([scale[i], used_colors[i]])\n discrete_cm.append([scale[i + 1], used_colors[i]])\n discrete_cm.append([scale[color_number], used_colors[color_number - 1]])\n\n return all_contours, all_values, all_areas, all_colors, discrete_cm\n\n\ndef _contour_trace(\n x,\n y,\n z,\n ncontours=None,\n colorscale="Electric",\n linecolor="rgb(150,150,150)",\n interp_mode="llr",\n coloring=None,\n v_min=0,\n v_max=1,\n):\n """\n Contour trace in Cartesian coordinates.\n\n Parameters\n ==========\n\n x, y : array-like\n Cartesian coordinates\n z : array-like\n Field to be represented as contours.\n ncontours : int or None\n Number of contours to display (determined automatically if None).\n colorscale : None or str (Plotly colormap)\n colorscale of the contours.\n linecolor : rgb color\n Color used for lines. If ``colorscale`` is not None, line colors are\n determined from ``colorscale`` instead.\n interp_mode : 'ilr' (default) or 'cartesian'\n Defines how data are interpolated to compute contours. If 'irl',\n ILR (Isometric Log-Ratio) of compositional data is performed. If\n 'cartesian', contours are determined in Cartesian space.\n coloring : None or 'lines'\n How to display contour. Filled contours if None, lines if ``lines``.\n vmin, vmax : float\n Bounds of interval of values used for the colorspace\n\n Notes\n =====\n """\n # Prepare colors\n # We do not take extrema, for example for one single contour\n # the color will be the middle point of the colormap\n colors = _colors(ncontours + 2, colorscale)\n # Values used for contours, extrema are not used\n # For example for a binary array [0, 1], the value of\n # the contour for ncontours=1 is 0.5.\n values = np.linspace(v_min, v_max, ncontours + 2)\n color_min, color_max = colors[0], colors[-1]\n colors = colors[1:-1]\n values = values[1:-1]\n\n # Color of line contours\n if linecolor is None:\n linecolor = "rgb(150, 150, 150)"\n else:\n colors = [linecolor] * ncontours\n\n # Retrieve all contours\n all_contours, all_values, all_areas, all_colors = _extract_contours(\n z, values, colors\n )\n\n # Now sort contours by decreasing area\n order = np.argsort(all_areas)[::-1]\n\n # Add outer contour\n all_contours, all_values, all_areas, all_colors, discrete_cm = _add_outer_contour(\n all_contours,\n all_values,\n all_areas,\n all_colors,\n values,\n all_values[order[0]],\n v_min,\n v_max,\n colors,\n color_min,\n color_max,\n )\n order = np.concatenate(([0], order + 1))\n\n # Compute traces, in the order of decreasing area\n traces = []\n M, invM = _transform_barycentric_cartesian()\n dx = (x.max() - x.min()) / x.size\n dy = (y.max() - y.min()) / y.size\n for index in order:\n y_contour, x_contour = all_contours[index].T\n val = all_values[index]\n if interp_mode == "cartesian":\n bar_coords = np.dot(\n invM,\n np.stack((dx * x_contour, dy * y_contour, np.ones(x_contour.shape))),\n )\n elif interp_mode == "ilr":\n bar_coords = _ilr_inverse(\n np.stack((dx * x_contour + x.min(), dy * y_contour + y.min()))\n )\n if index == 0: # outer triangle\n a = np.array([1, 0, 0])\n b = np.array([0, 1, 0])\n c = np.array([0, 0, 1])\n else:\n a, b, c = bar_coords\n if _is_invalid_contour(x_contour, y_contour):\n continue\n\n _col = all_colors[index] if coloring == "lines" else linecolor\n trace = dict(\n type="scatterternary",\n a=a,\n b=b,\n c=c,\n mode="lines",\n line=dict(color=_col, shape="spline", width=1),\n fill="toself",\n fillcolor=all_colors[index],\n showlegend=True,\n hoverinfo="skip",\n name="%.3f" % val,\n )\n if coloring == "lines":\n trace["fill"] = None\n traces.append(trace)\n\n return traces, discrete_cm\n\n\n# -------------------- Figure Factory for ternary contour -------------\n\n\ndef create_ternary_contour(\n coordinates,\n values,\n pole_labels=["a", "b", "c"],\n width=500,\n height=500,\n ncontours=None,\n showscale=False,\n coloring=None,\n colorscale="Bluered",\n linecolor=None,\n title=None,\n interp_mode="ilr",\n showmarkers=False,\n):\n """\n Ternary contour plot.\n\n Parameters\n ----------\n\n coordinates : list or ndarray\n Barycentric coordinates of shape (2, N) or (3, N) where N is the\n number of data points. The sum of the 3 coordinates is expected\n to be 1 for all data points.\n values : array-like\n Data points of field to be represented as contours.\n pole_labels : str, default ['a', 'b', 'c']\n Names of the three poles of the triangle.\n width : int\n Figure width.\n height : int\n Figure height.\n ncontours : int or None\n Number of contours to display (determined automatically if None).\n showscale : bool, default False\n If True, a colorbar showing the color scale is displayed.\n coloring : None or 'lines'\n How to display contour. Filled contours if None, lines if ``lines``.\n colorscale : None or str (Plotly colormap)\n colorscale of the contours.\n linecolor : None or rgb color\n Color used for lines. ``colorscale`` has to be set to None, otherwise\n line colors are determined from ``colorscale``.\n title : str or None\n Title of ternary plot\n interp_mode : 'ilr' (default) or 'cartesian'\n Defines how data are interpolated to compute contours. If 'irl',\n ILR (Isometric Log-Ratio) of compositional data is performed. If\n 'cartesian', contours are determined in Cartesian space.\n showmarkers : bool, default False\n If True, markers corresponding to input compositional points are\n superimposed on contours, using the same colorscale.\n\n Examples\n ========\n\n Example 1: ternary contour plot with filled contours\n\n >>> import plotly.figure_factory as ff\n >>> import numpy as np\n >>> # Define coordinates\n >>> a, b = np.mgrid[0:1:20j, 0:1:20j]\n >>> mask = a + b <= 1\n >>> a = a[mask].ravel()\n >>> b = b[mask].ravel()\n >>> c = 1 - a - b\n >>> # Values to be displayed as contours\n >>> z = a * b * c\n >>> fig = ff.create_ternary_contour(np.stack((a, b, c)), z)\n >>> fig.show()\n\n It is also possible to give only two barycentric coordinates for each\n point, since the sum of the three coordinates is one:\n\n >>> fig = ff.create_ternary_contour(np.stack((a, b)), z)\n\n\n Example 2: ternary contour plot with line contours\n\n >>> fig = ff.create_ternary_contour(np.stack((a, b, c)), z, coloring='lines')\n\n Example 3: customize number of contours\n\n >>> fig = ff.create_ternary_contour(np.stack((a, b, c)), z, ncontours=8)\n\n Example 4: superimpose contour plot and original data as markers\n\n >>> fig = ff.create_ternary_contour(np.stack((a, b, c)), z, coloring='lines',\n ... showmarkers=True)\n\n Example 5: customize title and pole labels\n\n >>> fig = ff.create_ternary_contour(np.stack((a, b, c)), z,\n ... title='Ternary plot',\n ... pole_labels=['clay', 'quartz', 'fledspar'])\n """\n if scipy_interp is None:\n raise ImportError(\n """\\n The create_ternary_contour figure factory requires the scipy package"""\n )\n sk_measure = optional_imports.get_module("skimage")\n if sk_measure is None:\n raise ImportError(\n """\\n The create_ternary_contour figure factory requires the scikit-image\n package"""\n )\n if colorscale is None:\n showscale = False\n if ncontours is None:\n ncontours = 5\n coordinates = _prepare_barycentric_coord(coordinates)\n v_min, v_max = values.min(), values.max()\n grid_z, gr_x, gr_y = _compute_grid(coordinates, values, interp_mode=interp_mode)\n\n layout = _ternary_layout(\n pole_labels=pole_labels, width=width, height=height, title=title\n )\n\n contour_trace, discrete_cm = _contour_trace(\n gr_x,\n gr_y,\n grid_z,\n ncontours=ncontours,\n colorscale=colorscale,\n linecolor=linecolor,\n interp_mode=interp_mode,\n coloring=coloring,\n v_min=v_min,\n v_max=v_max,\n )\n\n fig = go.Figure(data=contour_trace, layout=layout)\n\n opacity = 1 if showmarkers else 0\n a, b, c = coordinates\n hovertemplate = (\n pole_labels[0]\n + ": %{a:.3f}<br>"\n + pole_labels[1]\n + ": %{b:.3f}<br>"\n + pole_labels[2]\n + ": %{c:.3f}<br>"\n "z: %{marker.color:.3f}<extra></extra>"\n )\n\n fig.add_scatterternary(\n a=a,\n b=b,\n c=c,\n mode="markers",\n marker={\n "color": values,\n "colorscale": colorscale,\n "line": {"color": "rgb(120, 120, 120)", "width": int(coloring != "lines")},\n },\n opacity=opacity,\n hovertemplate=hovertemplate,\n )\n if showscale:\n if not showmarkers:\n colorscale = discrete_cm\n colorbar = dict(\n {\n "type": "scatterternary",\n "a": [None],\n "b": [None],\n "c": [None],\n "marker": {\n "cmin": values.min(),\n "cmax": values.max(),\n "colorscale": colorscale,\n "showscale": True,\n },\n "mode": "markers",\n }\n )\n fig.add_trace(colorbar)\n\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_ternary_contour.py
_ternary_contour.py
Python
22,374
0.95
0.114162
0.033278
awesome-app
317
2024-03-02T14:12:56.150465
GPL-3.0
false
b425dd3bca70b2bfcbdab714a3142985
from plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.graph_objs import graph_objs\n\nnp = optional_imports.get_module("numpy")\n\n\ndef map_face2color(face, colormap, scale, vmin, vmax):\n """\n Normalize facecolor values by vmin/vmax and return rgb-color strings\n\n This function takes a tuple color along with a colormap and a minimum\n (vmin) and maximum (vmax) range of possible mean distances for the\n given parametrized surface. It returns an rgb color based on the mean\n distance between vmin and vmax\n\n """\n if vmin >= vmax:\n raise exceptions.PlotlyError(\n "Incorrect relation between vmin "\n "and vmax. The vmin value cannot be "\n "bigger than or equal to the value "\n "of vmax."\n )\n if len(colormap) == 1:\n # color each triangle face with the same color in colormap\n face_color = colormap[0]\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color\n if face == vmax:\n # pick last color in colormap\n face_color = colormap[-1]\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color\n else:\n if scale is None:\n # find the normalized distance t of a triangle face between\n # vmin and vmax where the distance is between 0 and 1\n t = (face - vmin) / float((vmax - vmin))\n low_color_index = int(t / (1.0 / (len(colormap) - 1)))\n\n face_color = clrs.find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n t * (len(colormap) - 1) - low_color_index,\n )\n\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n else:\n # find the face color for a non-linearly interpolated scale\n t = (face - vmin) / float((vmax - vmin))\n\n low_color_index = 0\n for k in range(len(scale) - 1):\n if scale[k] <= t < scale[k + 1]:\n break\n low_color_index += 1\n\n low_scale_val = scale[low_color_index]\n high_scale_val = scale[low_color_index + 1]\n\n face_color = clrs.find_intermediate_color(\n colormap[low_color_index],\n colormap[low_color_index + 1],\n (t - low_scale_val) / (high_scale_val - low_scale_val),\n )\n\n face_color = clrs.convert_to_RGB_255(face_color)\n face_color = clrs.label_rgb(face_color)\n return face_color\n\n\ndef trisurf(\n x,\n y,\n z,\n simplices,\n show_colorbar,\n edges_color,\n scale,\n colormap=None,\n color_func=None,\n plot_edges=False,\n x_edge=None,\n y_edge=None,\n z_edge=None,\n facecolor=None,\n):\n """\n Refer to FigureFactory.create_trisurf() for docstring\n """\n # numpy import check\n if not np:\n raise ImportError("FigureFactory._trisurf() requires numpy imported.")\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\n "If color_func is a list/array, it must "\n "be the same length as simplices."\n )\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if "#" in color_func[index]:\n foo = clrs.hex_to_rgb(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = clrs.convert_to_RGB_255(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = map_face2color(\n mean_dists[index], colormap, scale, min_mean_dists, max_mean_dists\n )\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = np.asarray(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(\n x=x, y=y, z=z, facecolor=facecolor, i=ii, j=jj, k=kk, name=""\n )\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = clrs.make_colorscale(colormap, scale)\n colorscale = clrs.convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[:1],\n y=y[:1],\n z=z[:1],\n mode="markers",\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True,\n ),\n hoverinfo="none",\n showlegend=False,\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, colorbar]\n else:\n return [triangles]\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\n "If any (x_edge, y_edge, z_edge) is None, all must be None"\n )\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack(\n [pull_edges[:, :, 0], np.tile(None, [pull_edges.shape[0], 1])]\n )\n y_edge_pull = np.hstack(\n [pull_edges[:, :, 1], np.tile(None, [pull_edges.shape[0], 1])]\n )\n z_edge_pull = np.hstack(\n [pull_edges[:, :, 2], np.tile(None, [pull_edges.shape[0], 1])]\n )\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\n "The lengths of x_edge, y_edge and z_edge are not the same."\n )\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge,\n y=y_edge,\n z=z_edge,\n mode="lines",\n line=graph_objs.scatter3d.Line(color=edges_color, width=1.5),\n showlegend=False,\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, lines, colorbar]\n else:\n return [triangles, lines]\n\n\ndef create_trisurf(\n x,\n y,\n z,\n simplices,\n colormap=None,\n show_colorbar=True,\n scale=None,\n color_func=None,\n title="Trisurf Plot",\n plot_edges=True,\n showbackground=True,\n backgroundcolor="rgb(230, 230, 230)",\n gridcolor="rgb(255, 255, 255)",\n zerolinecolor="rgb(255, 255, 255)",\n edges_color="rgb(50, 50, 50)",\n height=800,\n width=800,\n aspectratio=None,\n):\n """\n Returns figure for a triangulated surface plot\n\n :param (array) x: data values of x in a 1D array\n :param (array) y: data values of y in a 1D array\n :param (array) z: data values of z in a 1D array\n :param (array) simplices: an array of shape (ntri, 3) where ntri is\n the number of triangles in the triangularization. Each row of the\n array contains the indicies of the verticies of each triangle\n :param (str|tuple|list) colormap: either a plotly scale name, an rgb\n or hex color, a color tuple or a list of colors. An rgb color is\n of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colormap is a list, it must\n contain the valid color types aforementioned as its members\n :param (bool) show_colorbar: determines if colorbar is visible\n :param (list|array) scale: sets the scale values to be used if a non-\n linearly interpolated colormap is desired. If left as None, a\n linear interpolation between the colors will be excecuted\n :param (function|list) color_func: The parameter that determines the\n coloring of the surface. Takes either a function with 3 arguments\n x, y, z or a list/array of color values the same length as\n simplices. If None, coloring will only depend on the z axis\n :param (str) title: title of the plot\n :param (bool) plot_edges: determines if the triangles on the trisurf\n are visible\n :param (bool) showbackground: makes background in plot visible\n :param (str) backgroundcolor: color of background. Takes a string of\n the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) gridcolor: color of the gridlines besides the axes. Takes\n a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255\n inclusive\n :param (str) zerolinecolor: color of the axes. Takes a string of the\n form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive\n :param (str) edges_color: color of the edges, if plot_edges is True\n :param (int|float) height: the height of the plot (in pixels)\n :param (int|float) width: the width of the plot (in pixels)\n :param (dict) aspectratio: a dictionary of the aspect ratio values for\n the x, y and z axes. 'x', 'y' and 'z' take (int|float) values\n\n Example 1: Sphere\n\n >>> # Necessary Imports for Trisurf\n >>> import numpy as np\n >>> from scipy.spatial import Delaunay\n\n >>> from plotly.figure_factory import create_trisurf\n >>> from plotly.graph_objs import graph_objs\n\n >>> # Make data for plot\n >>> u = np.linspace(0, 2*np.pi, 20)\n >>> v = np.linspace(0, np.pi, 20)\n >>> u,v = np.meshgrid(u,v)\n >>> u = u.flatten()\n >>> v = v.flatten()\n\n >>> x = np.sin(v)*np.cos(u)\n >>> y = np.sin(v)*np.sin(u)\n >>> z = np.cos(v)\n\n >>> points2D = np.vstack([u,v]).T\n >>> tri = Delaunay(points2D)\n >>> simplices = tri.simplices\n\n >>> # Create a figure\n >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Rainbow",\n ... simplices=simplices)\n\n Example 2: Torus\n\n >>> # Necessary Imports for Trisurf\n >>> import numpy as np\n >>> from scipy.spatial import Delaunay\n\n >>> from plotly.figure_factory import create_trisurf\n >>> from plotly.graph_objs import graph_objs\n\n >>> # Make data for plot\n >>> u = np.linspace(0, 2*np.pi, 20)\n >>> v = np.linspace(0, 2*np.pi, 20)\n >>> u,v = np.meshgrid(u,v)\n >>> u = u.flatten()\n >>> v = v.flatten()\n\n >>> x = (3 + (np.cos(v)))*np.cos(u)\n >>> y = (3 + (np.cos(v)))*np.sin(u)\n >>> z = np.sin(v)\n\n >>> points2D = np.vstack([u,v]).T\n >>> tri = Delaunay(points2D)\n >>> simplices = tri.simplices\n\n >>> # Create a figure\n >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Viridis",\n ... simplices=simplices)\n\n Example 3: Mobius Band\n\n >>> # Necessary Imports for Trisurf\n >>> import numpy as np\n >>> from scipy.spatial import Delaunay\n\n >>> from plotly.figure_factory import create_trisurf\n >>> from plotly.graph_objs import graph_objs\n\n >>> # Make data for plot\n >>> u = np.linspace(0, 2*np.pi, 24)\n >>> v = np.linspace(-1, 1, 8)\n >>> u,v = np.meshgrid(u,v)\n >>> u = u.flatten()\n >>> v = v.flatten()\n\n >>> tp = 1 + 0.5*v*np.cos(u/2.)\n >>> x = tp*np.cos(u)\n >>> y = tp*np.sin(u)\n >>> z = 0.5*v*np.sin(u/2.)\n\n >>> points2D = np.vstack([u,v]).T\n >>> tri = Delaunay(points2D)\n >>> simplices = tri.simplices\n\n >>> # Create a figure\n >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],\n ... simplices=simplices)\n\n Example 4: Using a Custom Colormap Function with Light Cone\n\n >>> # Necessary Imports for Trisurf\n >>> import numpy as np\n >>> from scipy.spatial import Delaunay\n\n >>> from plotly.figure_factory import create_trisurf\n >>> from plotly.graph_objs import graph_objs\n\n >>> # Make data for plot\n >>> u=np.linspace(-np.pi, np.pi, 30)\n >>> v=np.linspace(-np.pi, np.pi, 30)\n >>> u,v=np.meshgrid(u,v)\n >>> u=u.flatten()\n >>> v=v.flatten()\n\n >>> x = u\n >>> y = u*np.cos(v)\n >>> z = u*np.sin(v)\n\n >>> points2D = np.vstack([u,v]).T\n >>> tri = Delaunay(points2D)\n >>> simplices = tri.simplices\n\n >>> # Define distance function\n >>> def dist_origin(x, y, z):\n ... return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)\n\n >>> # Create a figure\n >>> fig1 = create_trisurf(x=x, y=y, z=z,\n ... colormap=['#FFFFFF', '#E4FFFE',\n ... '#A4F6F9', '#FF99FE',\n ... '#BA52ED'],\n ... scale=[0, 0.6, 0.71, 0.89, 1],\n ... simplices=simplices,\n ... color_func=dist_origin)\n\n Example 5: Enter color_func as a list of colors\n\n >>> # Necessary Imports for Trisurf\n >>> import numpy as np\n >>> from scipy.spatial import Delaunay\n >>> import random\n\n >>> from plotly.figure_factory import create_trisurf\n >>> from plotly.graph_objs import graph_objs\n\n >>> # Make data for plot\n >>> u=np.linspace(-np.pi, np.pi, 30)\n >>> v=np.linspace(-np.pi, np.pi, 30)\n >>> u,v=np.meshgrid(u,v)\n >>> u=u.flatten()\n >>> v=v.flatten()\n\n >>> x = u\n >>> y = u*np.cos(v)\n >>> z = u*np.sin(v)\n\n >>> points2D = np.vstack([u,v]).T\n >>> tri = Delaunay(points2D)\n >>> simplices = tri.simplices\n\n\n >>> colors = []\n >>> color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']\n\n >>> for index in range(len(simplices)):\n ... colors.append(random.choice(color_choices))\n\n >>> fig = create_trisurf(\n ... x, y, z, simplices,\n ... color_func=colors,\n ... show_colorbar=True,\n ... edges_color='rgb(2, 85, 180)',\n ... title=' Modern Art'\n ... )\n """\n if aspectratio is None:\n aspectratio = {"x": 1, "y": 1, "z": 1}\n\n # Validate colormap\n clrs.validate_colors(colormap)\n colormap, scale = clrs.convert_colors_to_same_type(\n colormap, colortype="tuple", return_default_colors=True, scale=scale\n )\n\n data1 = trisurf(\n x,\n y,\n z,\n simplices,\n show_colorbar=show_colorbar,\n color_func=color_func,\n colormap=colormap,\n scale=scale,\n edges_color=edges_color,\n plot_edges=plot_edges,\n )\n\n axis = dict(\n showbackground=showbackground,\n backgroundcolor=backgroundcolor,\n gridcolor=gridcolor,\n zerolinecolor=zerolinecolor,\n )\n layout = graph_objs.Layout(\n title=title,\n width=width,\n height=height,\n scene=graph_objs.layout.Scene(\n xaxis=graph_objs.layout.scene.XAxis(**axis),\n yaxis=graph_objs.layout.scene.YAxis(**axis),\n zaxis=graph_objs.layout.scene.ZAxis(**axis),\n aspectratio=dict(\n x=aspectratio["x"], y=aspectratio["y"], z=aspectratio["z"]\n ),\n ),\n )\n\n return graph_objs.Figure(data=data1, layout=layout)\n
.venv\Lib\site-packages\plotly\figure_factory\_trisurf.py
_trisurf.py
Python
16,880
0.95
0.121807
0.055684
awesome-app
446
2023-12-18T12:19:39.691990
MIT
false
2e92f4ef260670033d8870688e7bae1e
from numbers import Number\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.graph_objs import graph_objs\nfrom plotly.subplots import make_subplots\n\npd = optional_imports.get_module("pandas")\nnp = optional_imports.get_module("numpy")\nscipy_stats = optional_imports.get_module("scipy.stats")\n\n\ndef calc_stats(data):\n """\n Calculate statistics for use in violin plot.\n """\n x = np.asarray(data, float)\n vals_min = np.min(x)\n vals_max = np.max(x)\n q2 = np.percentile(x, 50, interpolation="linear")\n q1 = np.percentile(x, 25, interpolation="lower")\n q3 = np.percentile(x, 75, interpolation="higher")\n iqr = q3 - q1\n whisker_dist = 1.5 * iqr\n\n # in order to prevent drawing whiskers outside the interval\n # of data one defines the whisker positions as:\n d1 = np.min(x[x >= (q1 - whisker_dist)])\n d2 = np.max(x[x <= (q3 + whisker_dist)])\n return {\n "min": vals_min,\n "max": vals_max,\n "q1": q1,\n "q2": q2,\n "q3": q3,\n "d1": d1,\n "d2": d2,\n }\n\n\ndef make_half_violin(x, y, fillcolor="#1f77b4", linecolor="rgb(0, 0, 0)"):\n """\n Produces a sideways probability distribution fig violin plot.\n """\n text = [\n "(pdf(y), y)=(" + "{:0.2f}".format(x[i]) + ", " + "{:0.2f}".format(y[i]) + ")"\n for i in range(len(x))\n ]\n\n return graph_objs.Scatter(\n x=x,\n y=y,\n mode="lines",\n name="",\n text=text,\n fill="tonextx",\n fillcolor=fillcolor,\n line=graph_objs.scatter.Line(width=0.5, color=linecolor, shape="spline"),\n hoverinfo="text",\n opacity=0.5,\n )\n\n\ndef make_violin_rugplot(vals, pdf_max, distance, color="#1f77b4"):\n """\n Returns a rugplot fig for a violin plot.\n """\n return graph_objs.Scatter(\n y=vals,\n x=[-pdf_max - distance] * len(vals),\n marker=graph_objs.scatter.Marker(color=color, symbol="line-ew-open"),\n mode="markers",\n name="",\n showlegend=False,\n hoverinfo="y",\n )\n\n\ndef make_non_outlier_interval(d1, d2):\n """\n Returns the scatterplot fig of most of a violin plot.\n """\n return graph_objs.Scatter(\n x=[0, 0],\n y=[d1, d2],\n name="",\n mode="lines",\n line=graph_objs.scatter.Line(width=1.5, color="rgb(0,0,0)"),\n )\n\n\ndef make_quartiles(q1, q3):\n """\n Makes the upper and lower quartiles for a violin plot.\n """\n return graph_objs.Scatter(\n x=[0, 0],\n y=[q1, q3],\n text=[\n "lower-quartile: " + "{:0.2f}".format(q1),\n "upper-quartile: " + "{:0.2f}".format(q3),\n ],\n mode="lines",\n line=graph_objs.scatter.Line(width=4, color="rgb(0,0,0)"),\n hoverinfo="text",\n )\n\n\ndef make_median(q2):\n """\n Formats the 'median' hovertext for a violin plot.\n """\n return graph_objs.Scatter(\n x=[0],\n y=[q2],\n text=["median: " + "{:0.2f}".format(q2)],\n mode="markers",\n marker=dict(symbol="square", color="rgb(255,255,255)"),\n hoverinfo="text",\n )\n\n\ndef make_XAxis(xaxis_title, xaxis_range):\n """\n Makes the x-axis for a violin plot.\n """\n xaxis = graph_objs.layout.XAxis(\n title=xaxis_title,\n range=xaxis_range,\n showgrid=False,\n zeroline=False,\n showline=False,\n mirror=False,\n ticks="",\n showticklabels=False,\n )\n return xaxis\n\n\ndef make_YAxis(yaxis_title):\n """\n Makes the y-axis for a violin plot.\n """\n yaxis = graph_objs.layout.YAxis(\n title=yaxis_title,\n showticklabels=True,\n autorange=True,\n ticklen=4,\n showline=True,\n zeroline=False,\n showgrid=False,\n mirror=False,\n )\n return yaxis\n\n\ndef violinplot(vals, fillcolor="#1f77b4", rugplot=True):\n """\n Refer to FigureFactory.create_violin() for docstring.\n """\n vals = np.asarray(vals, float)\n # summary statistics\n vals_min = calc_stats(vals)["min"]\n vals_max = calc_stats(vals)["max"]\n q1 = calc_stats(vals)["q1"]\n q2 = calc_stats(vals)["q2"]\n q3 = calc_stats(vals)["q3"]\n d1 = calc_stats(vals)["d1"]\n d2 = calc_stats(vals)["d2"]\n\n # kernel density estimation of pdf\n pdf = scipy_stats.gaussian_kde(vals)\n # grid over the data interval\n xx = np.linspace(vals_min, vals_max, 100)\n # evaluate the pdf at the grid xx\n yy = pdf(xx)\n max_pdf = np.max(yy)\n # distance from the violin plot to rugplot\n distance = (2.0 * max_pdf) / 10 if rugplot else 0\n # range for x values in the plot\n plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]\n plot_data = [\n make_half_violin(-yy, xx, fillcolor=fillcolor),\n make_half_violin(yy, xx, fillcolor=fillcolor),\n make_non_outlier_interval(d1, d2),\n make_quartiles(q1, q3),\n make_median(q2),\n ]\n if rugplot:\n plot_data.append(\n make_violin_rugplot(vals, max_pdf, distance=distance, color=fillcolor)\n )\n return plot_data, plot_xrange\n\n\ndef violin_no_colorscale(\n data,\n data_header,\n group_header,\n colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n):\n """\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n """\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n if sort:\n group_name.sort()\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(\n rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False\n )\n color_index = 0\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], float)\n if color_index >= len(colors):\n color_index = 0\n plot_data, plot_xrange = violinplot(\n vals, fillcolor=colors[color_index], rugplot=rugplot\n )\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n color_index += 1\n\n # add violin plot labels\n fig["layout"].update(\n {"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}\n )\n\n # set the sharey axis style\n fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})\n fig["layout"].update(\n title=title,\n showlegend=False,\n hovermode="closest",\n autosize=False,\n height=height,\n width=width,\n )\n\n return fig\n\n\ndef violin_colorscale(\n data,\n data_header,\n group_header,\n colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n):\n """\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot with colorscale.\n\n """\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n if sort:\n group_name.sort()\n\n # make sure all group names are keys in group_stats\n for group in group_name:\n if group not in group_stats:\n raise exceptions.PlotlyError(\n "All values/groups in the index "\n "column must be represented "\n "as a key in group_stats."\n )\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(\n rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False\n )\n\n # prepare low and high color for colorscale\n lowcolor = clrs.color_parser(colors[0], clrs.unlabel_rgb)\n highcolor = clrs.color_parser(colors[1], clrs.unlabel_rgb)\n\n # find min and max values in group_stats\n group_stats_values = []\n for key in group_stats:\n group_stats_values.append(group_stats[key])\n\n max_value = max(group_stats_values)\n min_value = min(group_stats_values)\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], float)\n\n # find intermediate color from colorscale\n intermed = (group_stats[gr] - min_value) / (max_value - min_value)\n intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)\n\n plot_data, plot_xrange = violinplot(\n vals, fillcolor="rgb{}".format(intermed_color), rugplot=rugplot\n )\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n fig["layout"].update(\n {"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}\n )\n # add colorbar to plot\n trace_dummy = graph_objs.Scatter(\n x=[0],\n y=[0],\n mode="markers",\n marker=dict(\n size=2,\n cmin=min_value,\n cmax=max_value,\n colorscale=[[0, colors[0]], [1, colors[1]]],\n showscale=True,\n ),\n showlegend=False,\n )\n fig.append_trace(trace_dummy, 1, L)\n\n # set the sharey axis style\n fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})\n fig["layout"].update(\n title=title,\n showlegend=False,\n hovermode="closest",\n autosize=False,\n height=height,\n width=width,\n )\n\n return fig\n\n\ndef violin_dict(\n data,\n data_header,\n group_header,\n colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n):\n """\n Refer to FigureFactory.create_violin() for docstring.\n\n Returns fig for violin plot without colorscale.\n\n """\n\n # collect all group names\n group_name = []\n for name in data[group_header]:\n if name not in group_name:\n group_name.append(name)\n\n if sort:\n group_name.sort()\n\n # check if all group names appear in colors dict\n for group in group_name:\n if group not in colors:\n raise exceptions.PlotlyError(\n "If colors is a dictionary, all "\n "the group names must appear as "\n "keys in colors."\n )\n\n gb = data.groupby([group_header])\n L = len(group_name)\n\n fig = make_subplots(\n rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False\n )\n\n for k, gr in enumerate(group_name):\n vals = np.asarray(gb.get_group(gr)[data_header], float)\n plot_data, plot_xrange = violinplot(vals, fillcolor=colors[gr], rugplot=rugplot)\n for item in plot_data:\n fig.append_trace(item, 1, k + 1)\n\n # add violin plot labels\n fig["layout"].update(\n {"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}\n )\n\n # set the sharey axis style\n fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})\n fig["layout"].update(\n title=title,\n showlegend=False,\n hovermode="closest",\n autosize=False,\n height=height,\n width=width,\n )\n\n return fig\n\n\ndef create_violin(\n data,\n data_header=None,\n group_header=None,\n colors=None,\n use_colorscale=False,\n group_stats=None,\n rugplot=True,\n sort=False,\n height=450,\n width=600,\n title="Violin and Rug Plot",\n):\n """\n **deprecated**, use instead the plotly.graph_objects trace\n :class:`plotly.graph_objects.Violin`.\n\n :param (list|array) data: accepts either a list of numerical values,\n a list of dictionaries all with identical keys and at least one\n column of numeric values, or a pandas dataframe with at least one\n column of numbers.\n :param (str) data_header: the header of the data column to be used\n from an inputted pandas dataframe. Not applicable if 'data' is\n a list of numeric values.\n :param (str) group_header: applicable if grouping data by a variable.\n 'group_header' must be set to the name of the grouping variable.\n :param (str|tuple|list|dict) colors: either a plotly scale name,\n an rgb or hex color, a color tuple, a list of colors or a\n dictionary. An rgb color is of the form 'rgb(x, y, z)' where\n x, y and z belong to the interval [0, 255] and a color tuple is a\n tuple of the form (a, b, c) where a, b and c belong to [0, 1].\n If colors is a list, it must contain valid color types as its\n members.\n :param (bool) use_colorscale: only applicable if grouping by another\n variable. Will implement a colorscale based on the first 2 colors\n of param colors. This means colors must be a list with at least 2\n colors in it (Plotly colorscales are accepted since they map to a\n list of two rgb colors). Default = False\n :param (dict) group_stats: a dictionary where each key is a unique\n value from the group_header column in data. Each value must be a\n number and will be used to color the violin plots if a colorscale\n is being used.\n :param (bool) rugplot: determines if a rugplot is draw on violin plot.\n Default = True\n :param (bool) sort: determines if violins are sorted\n alphabetically (True) or by input order (False). Default = False\n :param (float) height: the height of the violin plot.\n :param (float) width: the width of the violin plot.\n :param (str) title: the title of the violin plot.\n\n Example 1: Single Violin Plot\n\n >>> from plotly.figure_factory import create_violin\n >>> import plotly.graph_objs as graph_objects\n\n >>> import numpy as np\n >>> from scipy import stats\n\n >>> # create list of random values\n >>> data_list = np.random.randn(100)\n\n >>> # create violin fig\n >>> fig = create_violin(data_list, colors='#604d9e')\n\n >>> # plot\n >>> fig.show()\n\n Example 2: Multiple Violin Plots with Qualitative Coloring\n\n >>> from plotly.figure_factory import create_violin\n >>> import plotly.graph_objs as graph_objects\n\n >>> import numpy as np\n >>> import pandas as pd\n >>> from scipy import stats\n\n >>> # create dataframe\n >>> np.random.seed(619517)\n >>> Nr=250\n >>> y = np.random.randn(Nr)\n >>> gr = np.random.choice(list("ABCDE"), Nr)\n >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n >>> for i, letter in enumerate("ABCDE"):\n ... y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n >>> df = pd.DataFrame(dict(Score=y, Group=gr))\n\n >>> # create violin fig\n >>> fig = create_violin(df, data_header='Score', group_header='Group',\n ... sort=True, height=600, width=1000)\n\n >>> # plot\n >>> fig.show()\n\n Example 3: Violin Plots with Colorscale\n\n >>> from plotly.figure_factory import create_violin\n >>> import plotly.graph_objs as graph_objects\n\n >>> import numpy as np\n >>> import pandas as pd\n >>> from scipy import stats\n\n >>> # create dataframe\n >>> np.random.seed(619517)\n >>> Nr=250\n >>> y = np.random.randn(Nr)\n >>> gr = np.random.choice(list("ABCDE"), Nr)\n >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]\n\n >>> for i, letter in enumerate("ABCDE"):\n ... y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]\n >>> df = pd.DataFrame(dict(Score=y, Group=gr))\n\n >>> # define header params\n >>> data_header = 'Score'\n >>> group_header = 'Group'\n\n >>> # make groupby object with pandas\n >>> group_stats = {}\n >>> groupby_data = df.groupby([group_header])\n\n >>> for group in "ABCDE":\n ... data_from_group = groupby_data.get_group(group)[data_header]\n ... # take a stat of the grouped data\n ... stat = np.median(data_from_group)\n ... # add to dictionary\n ... group_stats[group] = stat\n\n >>> # create violin fig\n >>> fig = create_violin(df, data_header='Score', group_header='Group',\n ... height=600, width=1000, use_colorscale=True,\n ... group_stats=group_stats)\n\n >>> # plot\n >>> fig.show()\n """\n\n # Validate colors\n if isinstance(colors, dict):\n valid_colors = clrs.validate_colors_dict(colors, "rgb")\n else:\n valid_colors = clrs.validate_colors(colors, "rgb")\n\n # validate data and choose plot type\n if group_header is None:\n if isinstance(data, list):\n if len(data) <= 0:\n raise exceptions.PlotlyError(\n "If data is a list, it must be "\n "nonempty and contain either "\n "numbers or dictionaries."\n )\n\n if not all(isinstance(element, Number) for element in data):\n raise exceptions.PlotlyError(\n "If data is a list, it must contain only numbers."\n )\n\n if pd and isinstance(data, pd.core.frame.DataFrame):\n if data_header is None:\n raise exceptions.PlotlyError(\n "data_header must be the "\n "column name with the "\n "desired numeric data for "\n "the violin plot."\n )\n\n data = data[data_header].values.tolist()\n\n # call the plotting functions\n plot_data, plot_xrange = violinplot(\n data, fillcolor=valid_colors[0], rugplot=rugplot\n )\n\n layout = graph_objs.Layout(\n title=title,\n autosize=False,\n font=graph_objs.layout.Font(size=11),\n height=height,\n showlegend=False,\n width=width,\n xaxis=make_XAxis("", plot_xrange),\n yaxis=make_YAxis(""),\n hovermode="closest",\n )\n layout["yaxis"].update(dict(showline=False, showticklabels=False, ticks=""))\n\n fig = graph_objs.Figure(data=plot_data, layout=layout)\n\n return fig\n\n else:\n if not isinstance(data, pd.core.frame.DataFrame):\n raise exceptions.PlotlyError(\n "Error. You must use a pandas "\n "DataFrame if you are using a "\n "group header."\n )\n\n if data_header is None:\n raise exceptions.PlotlyError(\n "data_header must be the column "\n "name with the desired numeric "\n "data for the violin plot."\n )\n\n if use_colorscale is False:\n if isinstance(valid_colors, dict):\n # validate colors dict choice below\n fig = violin_dict(\n data,\n data_header,\n group_header,\n valid_colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n )\n return fig\n else:\n fig = violin_no_colorscale(\n data,\n data_header,\n group_header,\n valid_colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n )\n return fig\n else:\n if isinstance(valid_colors, dict):\n raise exceptions.PlotlyError(\n "The colors param cannot be "\n "a dictionary if you are "\n "using a colorscale."\n )\n\n if len(valid_colors) < 2:\n raise exceptions.PlotlyError(\n "colors must be a list with "\n "at least 2 colors. A "\n "Plotly scale is allowed."\n )\n\n if not isinstance(group_stats, dict):\n raise exceptions.PlotlyError(\n "Your group_stats param must be a dictionary."\n )\n\n fig = violin_colorscale(\n data,\n data_header,\n group_header,\n valid_colors,\n use_colorscale,\n group_stats,\n rugplot,\n sort,\n height,\n width,\n title,\n )\n return fig\n
.venv\Lib\site-packages\plotly\figure_factory\_violin.py
_violin.py
Python
20,386
0.95
0.116477
0.044925
python-kit
585
2023-10-26T16:21:52.143584
BSD-3-Clause
false
46beb80ee1f146a1d3eb6a8c758a859f
# ruff: noqa: E402\n\nfrom plotly import optional_imports\n\n# Require that numpy exists for figure_factory\nnp = optional_imports.get_module("numpy")\nif np is None:\n raise ImportError(\n """\\nThe figure factory module requires the numpy package"""\n )\n\n\nfrom plotly.figure_factory._2d_density import create_2d_density\nfrom plotly.figure_factory._annotated_heatmap import create_annotated_heatmap\nfrom plotly.figure_factory._bullet import create_bullet\nfrom plotly.figure_factory._candlestick import create_candlestick\nfrom plotly.figure_factory._dendrogram import create_dendrogram\nfrom plotly.figure_factory._distplot import create_distplot\nfrom plotly.figure_factory._facet_grid import create_facet_grid\nfrom plotly.figure_factory._gantt import create_gantt\nfrom plotly.figure_factory._ohlc import create_ohlc\nfrom plotly.figure_factory._quiver import create_quiver\nfrom plotly.figure_factory._scatterplot import create_scatterplotmatrix\nfrom plotly.figure_factory._streamline import create_streamline\nfrom plotly.figure_factory._table import create_table\nfrom plotly.figure_factory._trisurf import create_trisurf\nfrom plotly.figure_factory._violin import create_violin\n\nif optional_imports.get_module("pandas") is not None:\n from plotly.figure_factory._county_choropleth import create_choropleth\n from plotly.figure_factory._hexbin_mapbox import create_hexbin_mapbox\nelse:\n\n def create_choropleth(*args, **kwargs):\n raise ImportError("Please install pandas to use `create_choropleth`")\n\n def create_hexbin_mapbox(*args, **kwargs):\n raise ImportError("Please install pandas to use `create_hexbin_mapbox`")\n\n\nif optional_imports.get_module("skimage") is not None:\n from plotly.figure_factory._ternary_contour import create_ternary_contour\nelse:\n\n def create_ternary_contour(*args, **kwargs):\n raise ImportError("Please install scikit-image to use `create_ternary_contour`")\n\n\n__all__ = [\n "create_2d_density",\n "create_annotated_heatmap",\n "create_bullet",\n "create_candlestick",\n "create_choropleth",\n "create_dendrogram",\n "create_distplot",\n "create_facet_grid",\n "create_gantt",\n "create_hexbin_mapbox",\n "create_ohlc",\n "create_quiver",\n "create_scatterplotmatrix",\n "create_streamline",\n "create_table",\n "create_ternary_contour",\n "create_trisurf",\n "create_violin",\n]\n
.venv\Lib\site-packages\plotly\figure_factory\__init__.py
__init__.py
Python
2,377
0.95
0.101449
0.035088
python-kit
808
2024-03-16T20:48:14.698483
GPL-3.0
false
607313fdf7755e624e5caa9c8804dc94
\n\n
.venv\Lib\site-packages\plotly\figure_factory\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
9,490
0.8
0.058824
0
node-utils
606
2025-03-25T06:37:32.127244
BSD-3-Clause
false
d230f7ec181dc05b422ec6ff5f5bc8e1
\n\n
.venv\Lib\site-packages\plotly\figure_factory\__pycache__\_2d_density.cpython-313.pyc
_2d_density.cpython-313.pyc
Other
5,425
0.95
0.022222
0.027027
node-utils
799
2025-03-03T21:49:27.159962
MIT
false
0537b675b3fc5f45b581aac96b2fceef
\n\n
.venv\Lib\site-packages\plotly\figure_factory\__pycache__\_annotated_heatmap.cpython-313.pyc
_annotated_heatmap.cpython-313.pyc
Other
11,128
0.95
0.089744
0.007042
vue-tools
14
2024-11-08T04:25:17.081522
MIT
false
7bfd28f6c64a7e878e40198c5039d76c
\n\n
.venv\Lib\site-packages\plotly\figure_factory\__pycache__\_bullet.cpython-313.pyc
_bullet.cpython-313.pyc
Other
13,415
0.95
0.068783
0.005618
awesome-app
227
2024-09-03T18:21:11.829139
GPL-3.0
false
c4a64cd50d5286abdff92c87dbea4385
\n\n
.venv\Lib\site-packages\plotly\figure_factory\__pycache__\_candlestick.cpython-313.pyc
_candlestick.cpython-313.pyc
Other
11,061
0.95
0.029851
0.00578
react-lib
595
2024-03-15T02:50:30.741932
BSD-3-Clause
false
86a3c232c61869dbbf00c8dd9a2ad864